summaryrefslogtreecommitdiff
path: root/legacy_data/src/data
diff options
context:
space:
mode:
Diffstat (limited to 'legacy_data/src/data')
-rw-r--r--legacy_data/src/data/local.rs266
-rw-r--r--legacy_data/src/data/local/align_tasks.rs110
-rw-r--r--legacy_data/src/data/local/cached_sheet.rs94
-rw-r--r--legacy_data/src/data/local/latest_file_data.rs103
-rw-r--r--legacy_data/src/data/local/latest_info.rs81
-rw-r--r--legacy_data/src/data/local/local_files.rs148
-rw-r--r--legacy_data/src/data/local/local_sheet.rs439
-rw-r--r--legacy_data/src/data/local/modified_status.rs30
-rw-r--r--legacy_data/src/data/local/workspace_analyzer.rs359
-rw-r--r--legacy_data/src/data/local/workspace_config.rs374
-rw-r--r--legacy_data/src/data/member.rs71
-rw-r--r--legacy_data/src/data/sheet.rs278
-rw-r--r--legacy_data/src/data/user.rs28
-rw-r--r--legacy_data/src/data/user/accounts.rs162
-rw-r--r--legacy_data/src/data/vault.rs132
-rw-r--r--legacy_data/src/data/vault/lock_status.rs40
-rw-r--r--legacy_data/src/data/vault/mapping_share.rs422
-rw-r--r--legacy_data/src/data/vault/member_manage.rs144
-rw-r--r--legacy_data/src/data/vault/sheet_manage.rs274
-rw-r--r--legacy_data/src/data/vault/vault_config.rs233
-rw-r--r--legacy_data/src/data/vault/virtual_file.rs500
21 files changed, 4288 insertions, 0 deletions
diff --git a/legacy_data/src/data/local.rs b/legacy_data/src/data/local.rs
new file mode 100644
index 0000000..d4115c6
--- /dev/null
+++ b/legacy_data/src/data/local.rs
@@ -0,0 +1,266 @@
+use std::{
+ collections::HashMap,
+ env::current_dir,
+ path::{Path, PathBuf},
+ sync::Arc,
+};
+
+use cfg_file::config::ConfigFile;
+use string_proc::format_path::format_path;
+use tokio::{fs, sync::Mutex};
+use vcs_docs::docs::READMES_LOCAL_WORKSPACE_TODOLIST;
+
+use crate::{
+ constants::{
+ CLIENT_CONTENT_GITIGNORE, CLIENT_FILE_GITIGNORE, CLIENT_FILE_LOCAL_SHEET,
+ CLIENT_FILE_TODOLIST, CLIENT_FILE_WORKSPACE, CLIENT_FOLDER_WORKSPACE_ROOT_NAME,
+ CLIENT_PATH_LOCAL_SHEET, CLIENT_SUFFIX_LOCAL_SHEET_FILE, KEY_ACCOUNT, KEY_SHEET_NAME,
+ },
+ data::{
+ local::{
+ local_sheet::{LocalSheet, LocalSheetData, LocalSheetPathBuf},
+ workspace_config::LocalConfig,
+ },
+ member::MemberId,
+ sheet::SheetName,
+ },
+ env::{current_local_path, find_local_path},
+};
+
+pub mod align_tasks;
+pub mod cached_sheet;
+pub mod latest_file_data;
+pub mod latest_info;
+pub mod local_files;
+pub mod local_sheet;
+pub mod modified_status;
+pub mod workspace_analyzer;
+pub mod workspace_config;
+
+pub struct LocalWorkspace {
+ config: Arc<Mutex<LocalConfig>>,
+ local_path: PathBuf,
+}
+
+impl LocalWorkspace {
+ /// Get the path of the local workspace.
+ pub fn local_path(&self) -> &PathBuf {
+ &self.local_path
+ }
+
+ /// Initialize local workspace.
+ pub fn init(config: LocalConfig, local_path: impl Into<PathBuf>) -> Option<Self> {
+ let local_path = find_local_path(local_path)?;
+ Some(Self {
+ config: Arc::new(Mutex::new(config)),
+ local_path,
+ })
+ }
+
+ /// Initialize local workspace in the current directory.
+ pub fn init_current_dir(config: LocalConfig) -> Option<Self> {
+ let local_path = current_local_path()?;
+ Some(Self {
+ config: Arc::new(Mutex::new(config)),
+ local_path,
+ })
+ }
+
+ /// Setup local workspace
+ pub async fn setup_local_workspace(
+ local_path: impl Into<PathBuf>,
+ ) -> Result<(), std::io::Error> {
+ let local_path: PathBuf = local_path.into();
+
+ // Ensure directory is empty
+ if local_path.exists() && local_path.read_dir()?.next().is_some() {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::DirectoryNotEmpty,
+ "DirectoryNotEmpty",
+ ));
+ }
+
+ // 1. Setup config
+ let config = LocalConfig::default();
+ LocalConfig::write_to(&config, local_path.join(CLIENT_FILE_WORKSPACE)).await?;
+
+ // 2. Setup SETUP.md
+ let readme_content = READMES_LOCAL_WORKSPACE_TODOLIST.trim().to_string();
+ fs::write(local_path.join(CLIENT_FILE_TODOLIST), readme_content).await?;
+
+ // 3. Setup .gitignore
+ fs::write(
+ local_path.join(CLIENT_FILE_GITIGNORE),
+ CLIENT_CONTENT_GITIGNORE,
+ )
+ .await?;
+
+ // On Windows, set the .jv directory as hidden
+ let jv_dir = local_path.join(CLIENT_FOLDER_WORKSPACE_ROOT_NAME);
+ let _ = hide_folder::hide_folder(&jv_dir);
+
+ Ok(())
+ }
+
+ /// Get a reference to the local configuration.
+ pub fn config(&self) -> Arc<Mutex<LocalConfig>> {
+ self.config.clone()
+ }
+
+ /// Setup local workspace in current directory
+ pub async fn setup_local_workspace_current_dir() -> Result<(), std::io::Error> {
+ Self::setup_local_workspace(current_dir()?).await?;
+ Ok(())
+ }
+
+ /// Get the path to a local sheet.
+ pub fn local_sheet_path(&self, member: &MemberId, sheet: &SheetName) -> PathBuf {
+ self.local_path.join(
+ CLIENT_FILE_LOCAL_SHEET
+ .replace(KEY_ACCOUNT, member)
+ .replace(KEY_SHEET_NAME, sheet),
+ )
+ }
+
+ /// Read or initialize a local sheet.
+ pub async fn local_sheet(
+ &self,
+ member: &MemberId,
+ sheet: &SheetName,
+ ) -> Result<LocalSheet<'_>, std::io::Error> {
+ let local_sheet_path = self.local_sheet_path(member, sheet);
+
+ if !local_sheet_path.exists() {
+ let sheet_data = LocalSheetData {
+ mapping: HashMap::new(),
+ vfs: HashMap::new(),
+ };
+ LocalSheetData::write_to(&sheet_data, local_sheet_path).await?;
+ return Ok(LocalSheet {
+ local_workspace: self,
+ member: member.clone(),
+ sheet_name: sheet.clone(),
+ data: sheet_data,
+ });
+ }
+
+ let data = LocalSheetData::read_from(&local_sheet_path).await?;
+ let local_sheet = LocalSheet {
+ local_workspace: self,
+ member: member.clone(),
+ sheet_name: sheet.clone(),
+ data,
+ };
+
+ Ok(local_sheet)
+ }
+
+ /// Collect all theet names
+ pub async fn local_sheet_paths(&self) -> Result<Vec<LocalSheetPathBuf>, std::io::Error> {
+ let local_sheet_path = self.local_path.join(CLIENT_PATH_LOCAL_SHEET);
+ let mut sheet_paths = Vec::new();
+
+ async fn collect_sheet_paths(
+ dir: &Path,
+ suffix: &str,
+ paths: &mut Vec<LocalSheetPathBuf>,
+ ) -> Result<(), std::io::Error> {
+ if dir.is_dir() {
+ let mut entries = fs::read_dir(dir).await?;
+ while let Some(entry) = entries.next_entry().await? {
+ let path = entry.path();
+
+ if path.is_dir() {
+ Box::pin(collect_sheet_paths(&path, suffix, paths)).await?;
+ } else if path.is_file()
+ && let Some(extension) = path.extension()
+ && extension == suffix.trim_start_matches('.')
+ {
+ let formatted_path = format_path(path)?;
+ paths.push(formatted_path);
+ }
+ }
+ }
+ Ok(())
+ }
+
+ collect_sheet_paths(
+ &local_sheet_path,
+ CLIENT_SUFFIX_LOCAL_SHEET_FILE,
+ &mut sheet_paths,
+ )
+ .await?;
+ Ok(sheet_paths)
+ }
+}
+
+mod hide_folder {
+ use std::io;
+ use std::path::Path;
+
+ #[cfg(windows)]
+ use std::os::windows::ffi::OsStrExt;
+ #[cfg(windows)]
+ use winapi::um::fileapi::{GetFileAttributesW, INVALID_FILE_ATTRIBUTES, SetFileAttributesW};
+
+ pub fn hide_folder(path: &Path) -> io::Result<()> {
+ if !path.is_dir() {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Path must be a directory",
+ ));
+ }
+
+ if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) {
+ if !file_name.starts_with('.') {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Directory name must start with '.'",
+ ));
+ }
+ } else {
+ return Err(io::Error::new(
+ io::ErrorKind::InvalidInput,
+ "Invalid directory name",
+ ));
+ }
+
+ hide_folder_impl(path)
+ }
+
+ #[cfg(windows)]
+ fn hide_folder_impl(path: &Path) -> io::Result<()> {
+ // Convert to Windows wide string format
+ let path_str: Vec<u16> = path.as_os_str().encode_wide().chain(Some(0)).collect();
+
+ // Get current attributes
+ let attrs = unsafe { GetFileAttributesW(path_str.as_ptr()) };
+ if attrs == INVALID_FILE_ATTRIBUTES {
+ return Err(io::Error::last_os_error());
+ }
+
+ // Add hidden attribute flag
+ let new_attrs = attrs | winapi::um::winnt::FILE_ATTRIBUTE_HIDDEN;
+
+ // Set new attributes
+ let success = unsafe { SetFileAttributesW(path_str.as_ptr(), new_attrs) };
+ if success == 0 {
+ return Err(io::Error::last_os_error());
+ }
+
+ Ok(())
+ }
+
+ #[cfg(unix)]
+ fn hide_folder_impl(_path: &Path) -> io::Result<()> {
+ Ok(())
+ }
+
+ #[cfg(not(any(windows, unix)))]
+ fn hide_folder_impl(_path: &Path) -> io::Result<()> {
+ Err(io::Error::new(
+ io::ErrorKind::Unsupported,
+ "Unsupported operating system",
+ ))
+ }
+}
diff --git a/legacy_data/src/data/local/align_tasks.rs b/legacy_data/src/data/local/align_tasks.rs
new file mode 100644
index 0000000..b72804c
--- /dev/null
+++ b/legacy_data/src/data/local/align_tasks.rs
@@ -0,0 +1,110 @@
+use std::{
+ collections::{HashMap, HashSet},
+ path::PathBuf,
+};
+
+use data_struct::data_sort::quick_sort_with_cmp;
+
+use crate::data::local::workspace_analyzer::AnalyzeResult;
+
+pub type AlignTaskName = String;
+pub type AlignPathBuf = PathBuf;
+pub type AlignLostPathBuf = PathBuf;
+pub type AlignCreatedPathBuf = PathBuf;
+
+pub struct AlignTasks {
+ pub created: Vec<(AlignTaskName, AlignPathBuf)>,
+ pub lost: Vec<(AlignTaskName, AlignPathBuf)>,
+ pub moved: Vec<(AlignTaskName, (AlignLostPathBuf, AlignCreatedPathBuf))>,
+ pub erased: Vec<(AlignTaskName, AlignPathBuf)>,
+}
+
+impl AlignTasks {
+ pub fn clone_from_analyze_result(result: &AnalyzeResult) -> Self {
+ AlignTasks {
+ created: path_hash_set_sort_helper(result.created.clone(), "created"),
+ lost: path_hash_set_sort_helper(result.lost.clone(), "lost"),
+ moved: path_hash_map_sort_helper(result.moved.clone(), "moved"),
+ erased: path_hash_set_sort_helper(result.erased.clone(), "erased"),
+ }
+ }
+
+ pub fn from_analyze_result(result: AnalyzeResult) -> Self {
+ AlignTasks {
+ created: path_hash_set_sort_helper(result.created, "created"),
+ lost: path_hash_set_sort_helper(result.lost, "lost"),
+ moved: path_hash_map_sort_helper(result.moved, "moved"),
+ erased: path_hash_set_sort_helper(result.erased, "erased"),
+ }
+ }
+}
+
+fn path_hash_set_sort_helper(
+ hash_set: HashSet<PathBuf>,
+ prefix: impl Into<String>,
+) -> Vec<(String, PathBuf)> {
+ let prefix_str = prefix.into();
+ let mut vec: Vec<(String, PathBuf)> = hash_set
+ .into_iter()
+ .map(|path| {
+ let hash = sha1_hash::calc_sha1_string(path.to_string_lossy());
+ let hash_prefix: String = hash.chars().take(8).collect();
+ let name = format!("{}:{}", prefix_str, hash_prefix);
+ (name, path)
+ })
+ .collect();
+
+ quick_sort_with_cmp(&mut vec, false, |a, b| {
+ // Compare by path depth first
+ let a_depth = a.1.components().count();
+ let b_depth = b.1.components().count();
+
+ if a_depth != b_depth {
+ return if a_depth < b_depth { -1 } else { 1 };
+ }
+
+ // If same depth, compare lexicographically
+ match a.1.cmp(&b.1) {
+ std::cmp::Ordering::Less => -1,
+ std::cmp::Ordering::Equal => 0,
+ std::cmp::Ordering::Greater => 1,
+ }
+ });
+
+ vec
+}
+
+fn path_hash_map_sort_helper(
+ hash_map: HashMap<String, (PathBuf, PathBuf)>,
+ prefix: impl Into<String>,
+) -> Vec<(String, (PathBuf, PathBuf))> {
+ let prefix_str = prefix.into();
+ let mut vec: Vec<(String, (PathBuf, PathBuf))> = hash_map
+ .into_values()
+ .map(|(path1, path2)| {
+ let hash = sha1_hash::calc_sha1_string(path1.to_string_lossy());
+ let hash_prefix: String = hash.chars().take(8).collect();
+ let name = format!("{}:{}", prefix_str, hash_prefix);
+ (name, (path1, path2))
+ })
+ .collect();
+
+ quick_sort_with_cmp(&mut vec, false, |a, b| {
+ // Compare by first PathBuf's path depth first
+ let a_depth = a.1.0.components().count();
+ let b_depth = b.1.0.components().count();
+
+ if a_depth != b_depth {
+ return if a_depth < b_depth { -1 } else { 1 };
+ }
+
+ // If same depth, compare lexicographically by first PathBuf
+ match a.1.0.cmp(&b.1.0) {
+ std::cmp::Ordering::Less => -1,
+ std::cmp::Ordering::Equal => 0,
+ std::cmp::Ordering::Greater => 1,
+ }
+ });
+
+ vec
+}
diff --git a/legacy_data/src/data/local/cached_sheet.rs b/legacy_data/src/data/local/cached_sheet.rs
new file mode 100644
index 0000000..46b390f
--- /dev/null
+++ b/legacy_data/src/data/local/cached_sheet.rs
@@ -0,0 +1,94 @@
+use std::{io::Error, path::PathBuf};
+
+use cfg_file::config::ConfigFile;
+use string_proc::{format_path::format_path, snake_case};
+use tokio::fs;
+
+use crate::{
+ constants::{
+ CLIENT_FILE_CACHED_SHEET, CLIENT_PATH_CACHED_SHEET, CLIENT_SUFFIX_CACHED_SHEET_FILE,
+ KEY_SHEET_NAME,
+ },
+ data::sheet::{SheetData, SheetName},
+ env::current_local_path,
+};
+
+pub type CachedSheetPathBuf = PathBuf;
+
+/// # Cached Sheet
+/// The cached sheet is a read-only version cloned from the upstream repository to the local environment,
+/// automatically generated during update operations,
+/// which records the latest Sheet information stored locally to accelerate data access and reduce network requests.
+pub struct CachedSheet;
+
+impl CachedSheet {
+ /// Read the cached sheet data.
+ pub async fn cached_sheet_data(sheet_name: &SheetName) -> Result<SheetData, std::io::Error> {
+ let sheet_name = snake_case!(sheet_name.clone());
+
+ let Some(path) = Self::cached_sheet_path(sheet_name) else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ "Local workspace not found!",
+ ));
+ };
+ let data = SheetData::read_from(path).await?;
+ Ok(data)
+ }
+
+ /// Get the path to the cached sheet file.
+ pub fn cached_sheet_path(sheet_name: SheetName) -> Option<PathBuf> {
+ let current_workspace = current_local_path()?;
+ Some(
+ current_workspace
+ .join(CLIENT_FILE_CACHED_SHEET.replace(KEY_SHEET_NAME, &sheet_name.to_string())),
+ )
+ }
+
+ /// Get all cached sheet names
+ pub async fn cached_sheet_names() -> Result<Vec<SheetName>, std::io::Error> {
+ let mut dir = fs::read_dir(CLIENT_PATH_CACHED_SHEET).await?;
+ let mut sheet_names = Vec::new();
+
+ while let Some(entry) = dir.next_entry().await? {
+ let path = entry.path();
+
+ if path.is_file()
+ && let Some(file_name) = path.file_name().and_then(|n| n.to_str())
+ && file_name.ends_with(CLIENT_SUFFIX_CACHED_SHEET_FILE)
+ {
+ let name_without_ext = file_name
+ .trim_end_matches(CLIENT_SUFFIX_CACHED_SHEET_FILE)
+ .to_string();
+ sheet_names.push(name_without_ext);
+ }
+ }
+
+ Ok(sheet_names)
+ }
+
+ /// Get all cached sheet paths
+ pub async fn cached_sheet_paths() -> Result<Vec<CachedSheetPathBuf>, std::io::Error> {
+ let mut dir = fs::read_dir(CLIENT_PATH_CACHED_SHEET).await?;
+ let mut sheet_paths = Vec::new();
+ let Some(workspace_path) = current_local_path() else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ "Local workspace not found!",
+ ));
+ };
+
+ while let Some(entry) = dir.next_entry().await? {
+ let path = entry.path();
+
+ if path.is_file()
+ && let Some(file_name) = path.file_name().and_then(|n| n.to_str())
+ && file_name.ends_with(CLIENT_SUFFIX_CACHED_SHEET_FILE)
+ {
+ sheet_paths.push(format_path(workspace_path.join(path))?);
+ }
+ }
+
+ Ok(sheet_paths)
+ }
+}
diff --git a/legacy_data/src/data/local/latest_file_data.rs b/legacy_data/src/data/local/latest_file_data.rs
new file mode 100644
index 0000000..f9b3aeb
--- /dev/null
+++ b/legacy_data/src/data/local/latest_file_data.rs
@@ -0,0 +1,103 @@
+use std::{collections::HashMap, io::Error, path::PathBuf};
+
+use cfg_file::ConfigFile;
+use serde::{Deserialize, Serialize};
+
+use crate::{
+ constants::{CLIENT_FILE_LATEST_DATA, CLIENT_FILE_MEMBER_HELD_NOSET, KEY_ACCOUNT},
+ data::{
+ member::MemberId,
+ vault::virtual_file::{VirtualFileId, VirtualFileVersion, VirtualFileVersionDescription},
+ },
+ env::current_local_path,
+};
+
+/// # Latest file data
+/// Records the file holder and the latest version for permission and update checks
+#[derive(Debug, Default, Clone, Serialize, Deserialize, ConfigFile)]
+#[cfg_file(path = CLIENT_FILE_MEMBER_HELD_NOSET)]
+pub struct LatestFileData {
+ /// File holding status
+ #[serde(rename = "held")]
+ held_status: HashMap<VirtualFileId, HeldStatus>,
+
+ /// File version
+ #[serde(rename = "ver")]
+ versions: HashMap<VirtualFileId, VirtualFileVersion>,
+
+ /// File histories and descriptions
+ #[serde(rename = "his")]
+ histories: HashMap<VirtualFileId, Vec<(VirtualFileVersion, VirtualFileVersionDescription)>>,
+}
+
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub enum HeldStatus {
+ #[serde(rename = "Hold")]
+ HeldWith(MemberId), // Held, status changes are sync to the client
+
+ #[serde(rename = "None")]
+ NotHeld, // Not held, status changes are sync to the client
+
+ #[default]
+ #[serde(rename = "Unknown")]
+ WantedToKnow, // Holding status is unknown, notify server must inform client
+}
+
+impl LatestFileData {
+ /// Get the path to the file holding the held status information for the given member.
+ pub fn data_path(account: &MemberId) -> Result<PathBuf, std::io::Error> {
+ let Some(local_path) = current_local_path() else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ "Workspace not found.",
+ ));
+ };
+ Ok(local_path.join(CLIENT_FILE_LATEST_DATA.replace(KEY_ACCOUNT, account)))
+ }
+
+ /// Get the member who holds the file with the given ID.
+ pub fn file_holder(&self, vfid: &VirtualFileId) -> Option<&MemberId> {
+ self.held_status.get(vfid).and_then(|status| match status {
+ HeldStatus::HeldWith(id) => Some(id),
+ _ => None,
+ })
+ }
+
+ /// Get the version of the file with the given ID.
+ pub fn file_version(&self, vfid: &VirtualFileId) -> Option<&VirtualFileVersion> {
+ self.versions.get(vfid)
+ }
+
+ /// Get the version of the file with the given ID.
+ pub fn file_histories(
+ &self,
+ vfid: &VirtualFileId,
+ ) -> Option<&Vec<(VirtualFileVersion, VirtualFileVersionDescription)>> {
+ self.histories.get(vfid)
+ }
+
+ /// Update the held status of the files.
+ pub fn update_info(
+ &mut self,
+ map: HashMap<
+ VirtualFileId,
+ (
+ Option<MemberId>,
+ VirtualFileVersion,
+ Vec<(VirtualFileVersion, VirtualFileVersionDescription)>,
+ ),
+ >,
+ ) {
+ for (vfid, (member_id, version, desc)) in map {
+ self.held_status.insert(
+ vfid.clone(),
+ match member_id {
+ Some(member_id) => HeldStatus::HeldWith(member_id),
+ None => HeldStatus::NotHeld,
+ },
+ );
+ self.versions.insert(vfid.clone(), version);
+ self.histories.insert(vfid, desc);
+ }
+ }
+}
diff --git a/legacy_data/src/data/local/latest_info.rs b/legacy_data/src/data/local/latest_info.rs
new file mode 100644
index 0000000..5748793
--- /dev/null
+++ b/legacy_data/src/data/local/latest_info.rs
@@ -0,0 +1,81 @@
+use std::{
+ collections::{HashMap, HashSet},
+ path::{Path, PathBuf},
+ time::SystemTime,
+};
+
+use cfg_file::ConfigFile;
+use serde::{Deserialize, Serialize};
+
+use crate::{
+ constants::{CLIENT_FILE_LATEST_INFO, CLIENT_FILE_LATEST_INFO_NOSET, KEY_ACCOUNT},
+ data::{
+ member::{Member, MemberId},
+ sheet::{SheetData, SheetName, SheetPathBuf},
+ vault::{
+ mapping_share::{Share, SheetShareId},
+ virtual_file::VirtualFileId,
+ },
+ },
+};
+
+/// # Latest Info
+/// Locally cached latest information,
+/// used to cache personal information from upstream for querying and quickly retrieving member information.
+#[derive(Default, Serialize, Deserialize, ConfigFile)]
+#[cfg_file(path = CLIENT_FILE_LATEST_INFO_NOSET)]
+pub struct LatestInfo {
+ // Sheets
+ /// Visible sheets,
+ /// indicating which sheets I can edit
+ #[serde(rename = "my")]
+ pub visible_sheets: Vec<SheetName>,
+
+ /// Invisible sheets,
+ /// indicating which sheets I can export files to (these sheets are not readable to me)
+ #[serde(rename = "others")]
+ pub invisible_sheets: Vec<SheetInfo>,
+
+ /// Reference sheets,
+ /// indicating sheets owned by the host, visible to everyone,
+ /// but only the host can modify or add mappings within them
+ #[serde(rename = "refsheets")]
+ pub reference_sheets: HashSet<SheetName>,
+
+ /// Reference sheet data, indicating what files I can get from the reference sheet
+ #[serde(rename = "ref")]
+ pub ref_sheet_content: SheetData,
+
+ /// Reverse mapping from virtual file IDs to actual paths in reference sheets
+ #[serde(rename = "ref_vfs")]
+ pub ref_sheet_vfs_mapping: HashMap<VirtualFileId, SheetPathBuf>,
+
+ /// Shares in my sheets, indicating which external merge requests have entries that I can view
+ #[serde(rename = "shares")]
+ pub shares_in_my_sheets: HashMap<SheetName, HashMap<SheetShareId, Share>>,
+
+ /// Update instant
+ #[serde(rename = "update")]
+ pub update_instant: Option<SystemTime>,
+
+ // Members
+ /// All member information of the vault, allowing me to contact them more conveniently
+ #[serde(rename = "members")]
+ pub vault_members: Vec<Member>,
+}
+
+impl LatestInfo {
+ /// Get the path to the latest info file for a given workspace and member ID
+ pub fn latest_info_path(local_workspace_path: &Path, member_id: &MemberId) -> PathBuf {
+ local_workspace_path.join(CLIENT_FILE_LATEST_INFO.replace(KEY_ACCOUNT, member_id))
+ }
+}
+
+#[derive(Default, Serialize, Deserialize)]
+pub struct SheetInfo {
+ #[serde(rename = "name")]
+ pub sheet_name: SheetName,
+
+ #[serde(rename = "holder")]
+ pub holder_name: Option<MemberId>,
+}
diff --git a/legacy_data/src/data/local/local_files.rs b/legacy_data/src/data/local/local_files.rs
new file mode 100644
index 0000000..9cc244f
--- /dev/null
+++ b/legacy_data/src/data/local/local_files.rs
@@ -0,0 +1,148 @@
+use std::path::{Path, PathBuf};
+
+use string_proc::format_path::format_path;
+use tokio::fs;
+
+use crate::constants::CLIENT_FOLDER_WORKSPACE_ROOT_NAME;
+
+pub struct RelativeFiles {
+ pub(crate) files: Vec<PathBuf>,
+}
+
+impl IntoIterator for RelativeFiles {
+ type Item = PathBuf;
+ type IntoIter = std::vec::IntoIter<Self::Item>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.files.into_iter()
+ }
+}
+
+impl RelativeFiles {
+ pub fn iter(&self) -> std::slice::Iter<'_, PathBuf> {
+ self.files.iter()
+ }
+}
+
+/// Read the relative paths within the project from the input file list
+pub async fn get_relative_paths(local_path: &PathBuf, paths: &[PathBuf]) -> Option<RelativeFiles> {
+ // Get Relative Paths
+ let Ok(paths) = format_input_paths_and_ignore_outside_paths(local_path, paths).await else {
+ return None;
+ };
+ let files: Vec<PathBuf> = abs_paths_to_abs_files(paths).await;
+ let Ok(files) = parse_to_relative(local_path, files) else {
+ return None;
+ };
+ Some(RelativeFiles { files })
+}
+
+/// Normalize the input paths
+async fn format_input_paths(
+ local_path: &Path,
+ track_files: &[PathBuf],
+) -> Result<Vec<PathBuf>, std::io::Error> {
+ let current_dir = local_path;
+
+ let mut real_paths = Vec::new();
+ for file in track_files {
+ let path = current_dir.join(file);
+
+ // Skip paths that contain .jv directories
+ if path.components().any(|component| {
+ if let std::path::Component::Normal(name) = component {
+ name.to_str() == Some(CLIENT_FOLDER_WORKSPACE_ROOT_NAME)
+ } else {
+ false
+ }
+ }) {
+ continue;
+ }
+
+ match format_path(path) {
+ Ok(path) => real_paths.push(path),
+ Err(e) => {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::InvalidData,
+ format!("Failed to format path: {}", e),
+ ));
+ }
+ }
+ }
+
+ Ok(real_paths)
+}
+
+/// Ignore files outside the workspace
+async fn format_input_paths_and_ignore_outside_paths(
+ local_path: &PathBuf,
+ files: &[PathBuf],
+) -> Result<Vec<PathBuf>, std::io::Error> {
+ let result = format_input_paths(local_path, files).await?;
+ let result: Vec<PathBuf> = result
+ .into_iter()
+ .filter(|path| path.starts_with(local_path))
+ .collect();
+ Ok(result)
+}
+
+/// Normalize the input paths to relative paths
+fn parse_to_relative(
+ local_dir: &PathBuf,
+ files: Vec<PathBuf>,
+) -> Result<Vec<PathBuf>, std::io::Error> {
+ let result: Result<Vec<PathBuf>, _> = files
+ .iter()
+ .map(|p| {
+ p.strip_prefix(local_dir)
+ .map(|relative| relative.to_path_buf())
+ .map_err(|_| {
+ std::io::Error::new(
+ std::io::ErrorKind::InvalidInput,
+ "Path prefix stripping failed",
+ )
+ })
+ })
+ .collect();
+
+ result
+}
+
+/// Convert absolute paths to absolute file paths, expanding directories to their contained files
+async fn abs_paths_to_abs_files(paths: Vec<PathBuf>) -> Vec<PathBuf> {
+ let mut files = Vec::new();
+
+ for path in paths {
+ if !path.exists() {
+ continue;
+ }
+
+ let metadata = match fs::metadata(&path).await {
+ Ok(meta) => meta,
+ Err(_) => continue,
+ };
+
+ if metadata.is_file() {
+ files.push(path);
+ } else if metadata.is_dir() {
+ let walker = walkdir::WalkDir::new(&path);
+ for entry in walker.into_iter().filter_map(|e| e.ok()) {
+ if entry.path().components().any(|component| {
+ if let std::path::Component::Normal(name) = component {
+ name == CLIENT_FOLDER_WORKSPACE_ROOT_NAME
+ } else {
+ false
+ }
+ }) {
+ continue;
+ }
+
+ if entry.file_type().is_file() {
+ files.push(entry.path().to_path_buf());
+ }
+ }
+ }
+ }
+
+ files
+}
diff --git a/legacy_data/src/data/local/local_sheet.rs b/legacy_data/src/data/local/local_sheet.rs
new file mode 100644
index 0000000..b9c29f5
--- /dev/null
+++ b/legacy_data/src/data/local/local_sheet.rs
@@ -0,0 +1,439 @@
+use std::{collections::HashMap, io::Error, path::PathBuf, time::SystemTime};
+
+use ::serde::{Deserialize, Serialize};
+use cfg_file::{ConfigFile, config::ConfigFile};
+use string_proc::format_path::format_path;
+
+use crate::{
+ constants::CLIENT_FILE_LOCAL_SHEET_NOSET,
+ data::{
+ local::LocalWorkspace,
+ member::MemberId,
+ sheet::SheetName,
+ vault::virtual_file::{VirtualFileId, VirtualFileVersion, VirtualFileVersionDescription},
+ },
+};
+
+pub type LocalFilePathBuf = PathBuf;
+pub type LocalSheetPathBuf = PathBuf;
+
+/// # Local Sheet
+/// Local sheet information, used to record metadata of actual local files,
+/// to compare with upstream information for more optimized file submission,
+/// and to determine whether files need to be updated or submitted.
+pub struct LocalSheet<'a> {
+ pub(crate) local_workspace: &'a LocalWorkspace,
+ pub(crate) member: MemberId,
+ pub(crate) sheet_name: String,
+ pub(crate) data: LocalSheetData,
+}
+
+impl<'a> LocalSheet<'a> {
+ /// Create a new LocalSheet instance
+ pub fn new(
+ local_workspace: &'a LocalWorkspace,
+ member: MemberId,
+ sheet_name: String,
+ data: LocalSheetData,
+ ) -> Self {
+ Self {
+ local_workspace,
+ member,
+ sheet_name,
+ data,
+ }
+ }
+}
+
+#[derive(Debug, Default, Serialize, Deserialize, ConfigFile, Clone)]
+#[cfg_file(path = CLIENT_FILE_LOCAL_SHEET_NOSET)] // Do not use LocalSheet::write or LocalSheet::read
+pub struct LocalSheetData {
+ /// Local file path to metadata mapping.
+ #[serde(rename = "map")]
+ pub(crate) mapping: HashMap<LocalFilePathBuf, LocalMappingMetadata>,
+
+ #[serde(rename = "vfs")]
+ pub(crate) vfs: HashMap<VirtualFileId, LocalFilePathBuf>,
+}
+
+#[derive(Debug, Serialize, Deserialize, Clone)]
+pub struct LocalMappingMetadata {
+ /// Hash value generated immediately after the file is downloaded to the local workspace
+ #[serde(rename = "base_hash")]
+ pub(crate) hash_when_updated: String,
+
+ /// Time when the file was downloaded to the local workspace
+ #[serde(rename = "time")]
+ pub(crate) time_when_updated: SystemTime,
+
+ /// Size of the file when downloaded to the local workspace
+ #[serde(rename = "size")]
+ pub(crate) size_when_updated: u64,
+
+ /// Version description when the file was downloaded to the local workspace
+ #[serde(rename = "desc")]
+ pub(crate) version_desc_when_updated: VirtualFileVersionDescription,
+
+ /// Version when the file was downloaded to the local workspace
+ #[serde(rename = "ver")]
+ pub(crate) version_when_updated: VirtualFileVersion,
+
+ /// Virtual file ID corresponding to the local path
+ #[serde(rename = "id")]
+ pub(crate) mapping_vfid: VirtualFileId,
+
+ /// Latest modifiy check time
+ #[serde(rename = "check_time")]
+ pub(crate) last_modify_check_time: SystemTime,
+
+ /// Latest modifiy check result
+ #[serde(rename = "modified")]
+ pub(crate) last_modify_check_result: bool,
+
+ /// Latest modifiy check hash result
+ #[serde(rename = "current_hash")]
+ pub(crate) last_modify_check_hash: Option<String>,
+}
+
+impl LocalSheetData {
+ /// Wrap LocalSheetData into LocalSheet with workspace, member, and sheet name
+ pub fn wrap_to_local_sheet<'a>(
+ self,
+ workspace: &'a LocalWorkspace,
+ member: MemberId,
+ sheet_name: SheetName,
+ ) -> LocalSheet<'a> {
+ LocalSheet {
+ local_workspace: workspace,
+ member,
+ sheet_name,
+ data: self,
+ }
+ }
+}
+
+impl LocalMappingMetadata {
+ /// Create a new MappingMetaData instance
+ #[allow(clippy::too_many_arguments)]
+ pub fn new(
+ hash_when_updated: String,
+ time_when_updated: SystemTime,
+ size_when_updated: u64,
+ version_desc_when_updated: VirtualFileVersionDescription,
+ version_when_updated: VirtualFileVersion,
+ mapping_vfid: VirtualFileId,
+ last_modifiy_check_time: SystemTime,
+ last_modifiy_check_result: bool,
+ ) -> Self {
+ Self {
+ hash_when_updated,
+ time_when_updated,
+ size_when_updated,
+ version_desc_when_updated,
+ version_when_updated,
+ mapping_vfid,
+ last_modify_check_time: last_modifiy_check_time,
+ last_modify_check_result: last_modifiy_check_result,
+ last_modify_check_hash: None,
+ }
+ }
+
+ /// Getter for hash_when_updated
+ pub fn hash_when_updated(&self) -> &String {
+ &self.hash_when_updated
+ }
+
+ /// Setter for hash_when_updated
+ pub fn set_hash_when_updated(&mut self, hash: String) {
+ self.hash_when_updated = hash;
+ }
+
+ /// Getter for date_when_updated
+ pub fn time_when_updated(&self) -> &SystemTime {
+ &self.time_when_updated
+ }
+
+ /// Setter for time_when_updated
+ pub fn set_time_when_updated(&mut self, time: SystemTime) {
+ self.time_when_updated = time;
+ }
+
+ /// Getter for size_when_updated
+ pub fn size_when_updated(&self) -> u64 {
+ self.size_when_updated
+ }
+
+ /// Setter for size_when_updated
+ pub fn set_size_when_updated(&mut self, size: u64) {
+ self.size_when_updated = size;
+ }
+
+ /// Getter for version_desc_when_updated
+ pub fn version_desc_when_updated(&self) -> &VirtualFileVersionDescription {
+ &self.version_desc_when_updated
+ }
+
+ /// Setter for version_desc_when_updated
+ pub fn set_version_desc_when_updated(&mut self, version_desc: VirtualFileVersionDescription) {
+ self.version_desc_when_updated = version_desc;
+ }
+
+ /// Getter for version_when_updated
+ pub fn version_when_updated(&self) -> &VirtualFileVersion {
+ &self.version_when_updated
+ }
+
+ /// Setter for version_when_updated
+ pub fn set_version_when_updated(&mut self, version: VirtualFileVersion) {
+ self.version_when_updated = version;
+ }
+
+ /// Getter for mapping_vfid
+ pub fn mapping_vfid(&self) -> &VirtualFileId {
+ &self.mapping_vfid
+ }
+
+ /// Setter for mapping_vfid
+ pub fn set_mapping_vfid(&mut self, vfid: VirtualFileId) {
+ self.mapping_vfid = vfid;
+ }
+
+ /// Getter for last_modifiy_check_time
+ pub fn last_modifiy_check_time(&self) -> &SystemTime {
+ &self.last_modify_check_time
+ }
+
+ /// Setter for last_modifiy_check_time
+ pub fn set_last_modifiy_check_time(&mut self, time: SystemTime) {
+ self.last_modify_check_time = time;
+ }
+
+ /// Getter for last_modifiy_check_result
+ pub fn last_modifiy_check_result(&self) -> bool {
+ self.last_modify_check_result
+ }
+
+ /// Setter for last_modifiy_check_result
+ pub fn set_last_modifiy_check_result(&mut self, result: bool) {
+ self.last_modify_check_result = result;
+ }
+
+ /// Getter for last_modifiy_check_hash
+ pub fn last_modifiy_check_hash(&self) -> &Option<String> {
+ &self.last_modify_check_hash
+ }
+
+ /// Setter for last_modifiy_check_hash
+ pub fn set_last_modifiy_check_hash(&mut self, hash: Option<String>) {
+ self.last_modify_check_hash = hash;
+ }
+}
+
+impl Default for LocalMappingMetadata {
+ fn default() -> Self {
+ Self {
+ hash_when_updated: Default::default(),
+ time_when_updated: SystemTime::now(),
+ size_when_updated: Default::default(),
+ version_desc_when_updated: Default::default(),
+ version_when_updated: Default::default(),
+ mapping_vfid: Default::default(),
+ last_modify_check_time: SystemTime::now(),
+ last_modify_check_result: false,
+ last_modify_check_hash: None,
+ }
+ }
+}
+
+mod instant_serde {
+ use serde::{self, Deserialize, Deserializer, Serializer};
+ use tokio::time::Instant;
+
+ pub fn serialize<S>(instant: &Instant, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: Serializer,
+ {
+ serializer.serialize_u64(instant.elapsed().as_secs())
+ }
+
+ pub fn deserialize<'de, D>(deserializer: D) -> Result<Instant, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ let secs = u64::deserialize(deserializer)?;
+ Ok(Instant::now() - std::time::Duration::from_secs(secs))
+ }
+}
+
+impl<'a> From<&'a LocalSheet<'a>> for &'a LocalSheetData {
+ fn from(sheet: &'a LocalSheet<'a>) -> Self {
+ &sheet.data
+ }
+}
+
+impl LocalSheetData {
+ /// Add mapping to local sheet data
+ pub fn add_mapping(
+ &mut self,
+ path: &LocalFilePathBuf,
+ mapping: LocalMappingMetadata,
+ ) -> Result<(), std::io::Error> {
+ let path = format_path(path)?;
+ if self.mapping.contains_key(&path) || self.vfs.contains_key(&mapping.mapping_vfid) {
+ return Err(Error::new(
+ std::io::ErrorKind::AlreadyExists,
+ "Mapping already exists",
+ ));
+ }
+
+ self.mapping.insert(path.clone(), mapping.clone());
+ self.vfs.insert(mapping.mapping_vfid.clone(), path);
+ Ok(())
+ }
+
+ /// Move mapping to other path
+ pub fn move_mapping(
+ &mut self,
+ from: &LocalFilePathBuf,
+ to: &LocalFilePathBuf,
+ ) -> Result<(), std::io::Error> {
+ let from = format_path(from)?;
+ let to = format_path(to)?;
+ if self.mapping.contains_key(&to) {
+ return Err(Error::new(
+ std::io::ErrorKind::AlreadyExists,
+ "To path already exists.",
+ ));
+ }
+
+ let Some(old_value) = self.mapping.remove(&from) else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ "From path is not found.",
+ ));
+ };
+
+ // Update vfs mapping
+ self.vfs.insert(old_value.mapping_vfid.clone(), to.clone());
+ self.mapping.insert(to, old_value);
+
+ Ok(())
+ }
+
+ /// Remove mapping from local sheet
+ pub fn remove_mapping(
+ &mut self,
+ path: &LocalFilePathBuf,
+ ) -> Result<LocalMappingMetadata, std::io::Error> {
+ let path = format_path(path)?;
+ match self.mapping.remove(&path) {
+ Some(mapping) => {
+ self.vfs.remove(&mapping.mapping_vfid);
+ Ok(mapping)
+ }
+ None => Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ "Path is not found.",
+ )),
+ }
+ }
+
+ /// Get immutable mapping data
+ pub fn mapping_data(
+ &self,
+ path: &LocalFilePathBuf,
+ ) -> Result<&LocalMappingMetadata, std::io::Error> {
+ let path = format_path(path)?;
+ let Some(data) = self.mapping.get(&path) else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ "Path is not found.",
+ ));
+ };
+ Ok(data)
+ }
+
+ /// Get mutable mapping data
+ pub fn mapping_data_mut(
+ &mut self,
+ path: &LocalFilePathBuf,
+ ) -> Result<&mut LocalMappingMetadata, std::io::Error> {
+ let path = format_path(path)?;
+ let Some(data) = self.mapping.get_mut(&path) else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ "Path is not found.",
+ ));
+ };
+ Ok(data)
+ }
+
+ /// Get path by VirtualFileId
+ pub fn path_by_id(&self, vfid: &VirtualFileId) -> Option<&PathBuf> {
+ self.vfs.get(vfid)
+ }
+}
+
+impl<'a> LocalSheet<'a> {
+ /// Add mapping to local sheet data
+ pub fn add_mapping(
+ &mut self,
+ path: &LocalFilePathBuf,
+ mapping: LocalMappingMetadata,
+ ) -> Result<(), std::io::Error> {
+ self.data.add_mapping(path, mapping)
+ }
+
+ /// Move mapping to other path
+ pub fn move_mapping(
+ &mut self,
+ from: &LocalFilePathBuf,
+ to: &LocalFilePathBuf,
+ ) -> Result<(), std::io::Error> {
+ self.data.move_mapping(from, to)
+ }
+
+ /// Remove mapping from local sheet
+ pub fn remove_mapping(
+ &mut self,
+ path: &LocalFilePathBuf,
+ ) -> Result<LocalMappingMetadata, std::io::Error> {
+ self.data.remove_mapping(path)
+ }
+
+ /// Get immutable mapping data
+ pub fn mapping_data(
+ &self,
+ path: &LocalFilePathBuf,
+ ) -> Result<&LocalMappingMetadata, std::io::Error> {
+ self.data.mapping_data(path)
+ }
+
+ /// Get mutable mapping data
+ pub fn mapping_data_mut(
+ &mut self,
+ path: &LocalFilePathBuf,
+ ) -> Result<&mut LocalMappingMetadata, std::io::Error> {
+ self.data.mapping_data_mut(path)
+ }
+
+ /// Write the sheet to disk
+ pub async fn write(&mut self) -> Result<(), std::io::Error> {
+ let path = self
+ .local_workspace
+ .local_sheet_path(&self.member, &self.sheet_name);
+ self.write_to_path(path).await
+ }
+
+ /// Write the sheet to custom path
+ pub async fn write_to_path(&mut self, path: impl Into<PathBuf>) -> Result<(), std::io::Error> {
+ let path = path.into();
+ LocalSheetData::write_to(&self.data, path).await?;
+ Ok(())
+ }
+
+ /// Get path by VirtualFileId
+ pub fn path_by_id(&self, vfid: &VirtualFileId) -> Option<&PathBuf> {
+ self.data.path_by_id(vfid)
+ }
+}
diff --git a/legacy_data/src/data/local/modified_status.rs b/legacy_data/src/data/local/modified_status.rs
new file mode 100644
index 0000000..e0e6dd5
--- /dev/null
+++ b/legacy_data/src/data/local/modified_status.rs
@@ -0,0 +1,30 @@
+use crate::{constants::CLIENT_FILE_VAULT_MODIFIED, env::current_local_path};
+
+pub async fn check_vault_modified() -> bool {
+ let Some(current_dir) = current_local_path() else {
+ return false;
+ };
+
+ let record_file = current_dir.join(CLIENT_FILE_VAULT_MODIFIED);
+ if !record_file.exists() {
+ return false;
+ }
+
+ let Ok(contents) = tokio::fs::read_to_string(&record_file).await else {
+ return false;
+ };
+
+ matches!(contents.trim().to_lowercase().as_str(), "true")
+}
+
+pub async fn sign_vault_modified(modified: bool) {
+ let Some(current_dir) = current_local_path() else {
+ return;
+ };
+
+ let record_file = current_dir.join(CLIENT_FILE_VAULT_MODIFIED);
+
+ let contents = if modified { "true" } else { "false" };
+
+ let _ = tokio::fs::write(&record_file, contents).await;
+}
diff --git a/legacy_data/src/data/local/workspace_analyzer.rs b/legacy_data/src/data/local/workspace_analyzer.rs
new file mode 100644
index 0000000..5d73e03
--- /dev/null
+++ b/legacy_data/src/data/local/workspace_analyzer.rs
@@ -0,0 +1,359 @@
+use std::{
+ collections::{HashMap, HashSet},
+ io::Error,
+ path::PathBuf,
+};
+
+use serde::Serialize;
+use sha1_hash::calc_sha1_multi;
+use string_proc::format_path::format_path;
+use walkdir::WalkDir;
+
+use crate::data::{
+ local::{LocalWorkspace, cached_sheet::CachedSheet, local_sheet::LocalSheet},
+ member::MemberId,
+ sheet::{SheetData, SheetName},
+ vault::virtual_file::VirtualFileId,
+};
+
+pub type FromRelativePathBuf = PathBuf;
+pub type ToRelativePathBuf = PathBuf;
+pub type CreatedRelativePathBuf = PathBuf;
+pub type LostRelativePathBuf = PathBuf;
+pub type ModifiedRelativePathBuf = PathBuf;
+
+pub struct AnalyzeResult<'a> {
+ local_workspace: &'a LocalWorkspace,
+
+ /// Moved local files
+ pub moved: HashMap<VirtualFileId, (FromRelativePathBuf, ToRelativePathBuf)>,
+
+ /// Newly created local files
+ pub created: HashSet<CreatedRelativePathBuf>,
+
+ /// Lost local files
+ pub lost: HashSet<LostRelativePathBuf>,
+
+ /// Erased local files
+ pub erased: HashSet<LostRelativePathBuf>,
+
+ /// Modified local files (excluding moved files)
+ /// For files that were both moved and modified, changes can only be detected after LocalSheet mapping is aligned with actual files
+ pub modified: HashSet<ModifiedRelativePathBuf>,
+}
+
+#[derive(Serialize, Default)]
+pub struct AnalyzeResultPure {
+ /// Moved local files
+ pub moved: HashMap<VirtualFileId, (FromRelativePathBuf, ToRelativePathBuf)>,
+
+ /// Newly created local files
+ pub created: HashSet<CreatedRelativePathBuf>,
+
+ /// Lost local files
+ pub lost: HashSet<LostRelativePathBuf>,
+
+ /// Erased local files
+ pub erased: HashSet<LostRelativePathBuf>,
+
+ /// Modified local files (excluding moved files)
+ /// For files that were both moved and modified, changes can only be detected after LocalSheet mapping is aligned with actual files
+ pub modified: HashSet<ModifiedRelativePathBuf>,
+}
+
+impl<'a> From<AnalyzeResult<'a>> for AnalyzeResultPure {
+ fn from(result: AnalyzeResult<'a>) -> Self {
+ AnalyzeResultPure {
+ moved: result.moved,
+ created: result.created,
+ lost: result.lost,
+ erased: result.erased,
+ modified: result.modified,
+ }
+ }
+}
+
+struct AnalyzeContext<'a> {
+ member: MemberId,
+ sheet_name: SheetName,
+ local_sheet: Option<LocalSheet<'a>>,
+ cached_sheet_data: Option<SheetData>,
+}
+
+impl<'a> AnalyzeResult<'a> {
+ /// Analyze all files, calculate the file information provided
+ pub async fn analyze_local_status(
+ local_workspace: &'a LocalWorkspace,
+ ) -> Result<AnalyzeResult<'a>, std::io::Error> {
+ // Workspace
+ let workspace = local_workspace;
+
+ // Current member, sheet
+ let (member, sheet_name) = {
+ let mut_workspace = workspace.config.lock().await;
+ let member = mut_workspace.current_account();
+ let Some(sheet) = mut_workspace.sheet_in_use().clone() else {
+ return Err(Error::new(std::io::ErrorKind::NotFound, "Sheet not found"));
+ };
+ (member, sheet)
+ };
+
+ // Local files (RelativePaths)
+ let local_path = workspace.local_path();
+ let file_relative_paths = {
+ let mut paths = HashSet::new();
+ for entry in WalkDir::new(local_path) {
+ let entry = match entry {
+ Ok(entry) => entry,
+ Err(_) => continue,
+ };
+
+ // Skip entries that contain ".jv" in their path
+ if entry.path().to_string_lossy().contains(".jv") {
+ continue;
+ }
+
+ if entry.file_type().is_file()
+ && let Ok(relative_path) = entry.path().strip_prefix(local_path)
+ {
+ let format = format_path(relative_path.to_path_buf());
+ let Ok(format) = format else {
+ continue;
+ };
+ paths.insert(format);
+ }
+ }
+
+ paths
+ };
+
+ // Read local sheet
+ let local_sheet = (workspace.local_sheet(&member, &sheet_name).await).ok();
+
+ // Read cached sheet
+ let cached_sheet_data = match CachedSheet::cached_sheet_data(&sheet_name).await {
+ Ok(v) => Some(v),
+ Err(_) => {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ "Cached sheet not found",
+ ));
+ }
+ };
+
+ // Create new result
+ let mut result = Self::none_result(workspace);
+
+ // Analyze entry
+ let mut analyze_ctx = AnalyzeContext {
+ member,
+ sheet_name,
+ local_sheet,
+ cached_sheet_data,
+ };
+ Self::analyze_moved(&mut result, &file_relative_paths, &analyze_ctx, workspace).await?;
+ Self::analyze_modified(
+ &mut result,
+ &file_relative_paths,
+ &mut analyze_ctx,
+ workspace,
+ )
+ .await?;
+
+ Ok(result)
+ }
+
+ /// Track file moves by comparing recorded SHA1 hashes with actual file SHA1 hashes
+ /// For files that cannot be directly matched, continue searching using fuzzy matching algorithms
+ async fn analyze_moved(
+ result: &mut AnalyzeResult<'_>,
+ file_relative_paths: &HashSet<PathBuf>,
+ analyze_ctx: &AnalyzeContext<'a>,
+ workspace: &LocalWorkspace,
+ ) -> Result<(), std::io::Error> {
+ let local_sheet_paths: HashSet<&PathBuf> = match &analyze_ctx.local_sheet {
+ Some(local_sheet) => local_sheet.data.mapping.keys().collect(),
+ None => HashSet::new(),
+ };
+ let file_relative_paths_ref: HashSet<&PathBuf> = file_relative_paths.iter().collect();
+
+ // Files that exist locally but not in remote
+ let mut erased_files: HashSet<PathBuf> = HashSet::new();
+
+ if let Some(cached_data) = &analyze_ctx.cached_sheet_data {
+ if let Some(local_sheet) = &analyze_ctx.local_sheet {
+ let cached_sheet_mapping = cached_data.mapping();
+ let local_sheet_mapping = &local_sheet.data.mapping;
+
+ // Find paths that exist in local sheet but not in cached sheet
+ for local_path in local_sheet_mapping.keys() {
+ if !cached_sheet_mapping.contains_key(local_path) {
+ erased_files.insert(local_path.clone());
+ }
+ }
+ }
+ }
+
+ // Files that exist in the local sheet but not in reality are considered lost
+ let mut lost_files: HashSet<&PathBuf> = local_sheet_paths
+ .difference(&file_relative_paths_ref)
+ .filter(|&&path| !erased_files.contains(path))
+ .cloned()
+ .collect();
+
+ // Files that exist in reality but not in the local sheet are recorded as newly created
+ let mut new_files: HashSet<&PathBuf> = file_relative_paths_ref
+ .difference(&local_sheet_paths)
+ .cloned()
+ .collect();
+
+ // Calculate hashes for new files
+ let new_files_for_hash: Vec<PathBuf> = new_files
+ .iter()
+ .map(|p| workspace.local_path.join(p))
+ .collect();
+ let file_hashes: HashSet<(PathBuf, String)> =
+ match calc_sha1_multi::<PathBuf, Vec<PathBuf>>(new_files_for_hash, 8192).await {
+ Ok(hash) => hash,
+ Err(e) => return Err(Error::other(e)),
+ }
+ .iter()
+ .map(|r| (r.file_path.clone(), r.hash.to_string()))
+ .collect();
+
+ // Build hash mapping table for lost files
+ let mut lost_files_hash_mapping: HashMap<String, FromRelativePathBuf> =
+ match &analyze_ctx.local_sheet {
+ Some(local_sheet) => lost_files
+ .iter()
+ .filter_map(|f| {
+ local_sheet.mapping_data(f).ok().map(|mapping_data| {
+ (
+ // Using the most recently recorded Hash can more accurately identify moved items,
+ // but if it doesn't exist, fall back to the initially recorded Hash
+ mapping_data
+ .last_modify_check_hash
+ .as_ref()
+ .cloned()
+ .unwrap_or(mapping_data.hash_when_updated.clone()),
+ (*f).clone(),
+ )
+ })
+ })
+ .collect(),
+ None => HashMap::new(),
+ };
+
+ // If these hashes correspond to the hashes of missing files, then this pair of new and lost items will be merged into moved items
+ let mut moved_files: HashSet<(FromRelativePathBuf, ToRelativePathBuf)> = HashSet::new();
+ for (new_path, new_hash) in file_hashes {
+ let new_path = new_path
+ .strip_prefix(&workspace.local_path)
+ .map(|p| p.to_path_buf())
+ .unwrap_or(new_path);
+
+ // If the new hash value hits the mapping, add a moved item
+ if let Some(lost_path) = lost_files_hash_mapping.remove(&new_hash) {
+ // Remove this new item and lost item
+ lost_files.remove(&lost_path);
+ new_files.remove(&new_path);
+
+ // Create moved item
+ moved_files.insert((lost_path.clone(), new_path));
+ }
+ }
+
+ // Enter fuzzy matching to match other potentially moved items that haven't been matched
+ // If the total number of new and lost files is divisible by 2, it indicates there might still be files that have been moved, consider trying fuzzy matching
+ if new_files.len() + lost_files.len() % 2 == 0 {
+ // Try fuzzy matching
+ // ...
+ }
+
+ // Collect results and set the result
+ result.created = new_files.iter().map(|p| (*p).clone()).collect();
+ result.lost = lost_files.iter().map(|p| (*p).clone()).collect();
+ result.moved = moved_files
+ .iter()
+ .filter_map(|(from, to)| {
+ let vfid = analyze_ctx
+ .local_sheet
+ .as_ref()
+ .and_then(|local_sheet| local_sheet.mapping_data(from).ok())
+ .map(|mapping_data| mapping_data.mapping_vfid.clone());
+ vfid.map(|vfid| (vfid, (from.clone(), to.clone())))
+ })
+ .collect();
+ result.erased = erased_files;
+
+ Ok(())
+ }
+
+ /// Compare using file modification time and SHA1 hash values.
+ /// Note: For files that have been both moved and modified, they can only be recognized as modified after their location is matched.
+ async fn analyze_modified(
+ result: &mut AnalyzeResult<'_>,
+ file_relative_paths: &HashSet<PathBuf>,
+ analyze_ctx: &mut AnalyzeContext<'a>,
+ workspace: &LocalWorkspace,
+ ) -> Result<(), std::io::Error> {
+ let local_sheet = &mut analyze_ctx.local_sheet.as_mut().unwrap();
+ let local_path = local_sheet.local_workspace.local_path().clone();
+
+ for path in file_relative_paths {
+ // Get mapping data
+ let Ok(mapping_data) = local_sheet.mapping_data_mut(path) else {
+ continue;
+ };
+
+ // If modified time not changed, skip
+ let modified_time = std::fs::metadata(local_path.join(path))?.modified()?;
+ if &modified_time == mapping_data.last_modifiy_check_time() {
+ if mapping_data.last_modifiy_check_result() {
+ result.modified.insert(path.clone());
+ }
+ continue;
+ }
+
+ // Calculate hash
+ let hash_calc = match sha1_hash::calc_sha1(workspace.local_path.join(path), 2048).await
+ {
+ Ok(hash) => hash,
+ Err(e) => return Err(Error::other(e)),
+ };
+
+ // If hash not match, mark as modified
+ if &hash_calc.hash != mapping_data.hash_when_updated() {
+ result.modified.insert(path.clone());
+
+ // Update last modified check time to modified time
+ mapping_data.last_modify_check_time = modified_time;
+ mapping_data.last_modify_check_result = true;
+ } else {
+ // Update last modified check time to modified time
+ mapping_data.last_modify_check_time = modified_time;
+ mapping_data.last_modify_check_result = false;
+ }
+
+ // Record latest hash
+ mapping_data.last_modify_check_hash = Some(hash_calc.hash)
+ }
+
+ // Persist the local sheet data
+ LocalSheet::write(local_sheet).await?;
+
+ Ok(())
+ }
+
+ /// Generate a empty AnalyzeResult
+ fn none_result(local_workspace: &'a LocalWorkspace) -> AnalyzeResult<'a> {
+ AnalyzeResult {
+ local_workspace,
+ moved: HashMap::new(),
+ created: HashSet::new(),
+ lost: HashSet::new(),
+ modified: HashSet::new(),
+ erased: HashSet::new(),
+ }
+ }
+}
diff --git a/legacy_data/src/data/local/workspace_config.rs b/legacy_data/src/data/local/workspace_config.rs
new file mode 100644
index 0000000..f97d049
--- /dev/null
+++ b/legacy_data/src/data/local/workspace_config.rs
@@ -0,0 +1,374 @@
+use cfg_file::ConfigFile;
+use cfg_file::config::ConfigFile;
+use serde::{Deserialize, Serialize};
+use std::io::Error;
+use std::net::SocketAddr;
+use std::path::Path;
+use std::path::PathBuf;
+use string_proc::snake_case;
+
+use crate::constants::CLIENT_FILE_WORKSPACE;
+use crate::constants::CLIENT_FOLDER_WORKSPACE_ROOT_NAME;
+use crate::constants::CLIENT_PATH_LOCAL_DRAFT;
+use crate::constants::CLIENT_PATH_WORKSPACE_ROOT;
+use crate::constants::KEY_ACCOUNT;
+use crate::constants::KEY_SHEET_NAME;
+use crate::constants::PORT;
+use crate::data::local::latest_info::LatestInfo;
+use crate::data::member::MemberId;
+use crate::data::sheet::SheetName;
+use crate::data::vault::vault_config::VaultUuid;
+use crate::env::current_local_path;
+
+#[derive(Serialize, Deserialize, ConfigFile, Clone)]
+#[cfg_file(path = CLIENT_FILE_WORKSPACE)]
+pub struct LocalConfig {
+ /// The upstream address, representing the upstream address of the local workspace,
+ /// to facilitate timely retrieval of new updates from the upstream source.
+ #[serde(rename = "addr")]
+ upstream_addr: SocketAddr,
+
+ /// The member ID used by the current local workspace.
+ /// This ID will be used to verify access permissions when connecting to the upstream server.
+ #[serde(rename = "as")]
+ using_account: MemberId,
+
+ /// Whether the current member is interacting as a host.
+ /// In host mode, full Vault operation permissions are available except for adding new content.
+ #[serde(rename = "host")]
+ using_host_mode: bool,
+
+ /// Whether the local workspace is stained.
+ ///
+ /// If stained, it can only set an upstream server with the same identifier.
+ ///
+ /// If the value is None, it means not stained;
+ /// otherwise, it contains the stain identifier (i.e., the upstream vault's unique ID)
+ #[serde(rename = "up_uid")]
+ stained_uuid: Option<VaultUuid>,
+
+ /// The name of the sheet currently in use.
+ #[serde(rename = "use")]
+ sheet_in_use: Option<SheetName>,
+}
+
+impl Default for LocalConfig {
+ fn default() -> Self {
+ Self {
+ upstream_addr: SocketAddr::V4(std::net::SocketAddrV4::new(
+ std::net::Ipv4Addr::new(127, 0, 0, 1),
+ PORT,
+ )),
+ using_account: "unknown".to_string(),
+ using_host_mode: false,
+ stained_uuid: None,
+ sheet_in_use: None,
+ }
+ }
+}
+
+impl LocalConfig {
+ /// Set the vault address.
+ pub fn set_vault_addr(&mut self, addr: SocketAddr) {
+ self.upstream_addr = addr;
+ }
+
+ /// Get the vault address.
+ pub fn vault_addr(&self) -> SocketAddr {
+ self.upstream_addr
+ }
+
+ /// Set the currently used account
+ pub fn set_current_account(&mut self, account: MemberId) -> Result<(), std::io::Error> {
+ if self.sheet_in_use().is_some() {
+ return Err(Error::new(
+ std::io::ErrorKind::DirectoryNotEmpty,
+ "Please exit the current sheet before switching accounts",
+ ));
+ }
+ self.using_account = account;
+ Ok(())
+ }
+
+ /// Set the host mode
+ pub fn set_host_mode(&mut self, host_mode: bool) {
+ self.using_host_mode = host_mode;
+ }
+
+ /// Set the currently used sheet
+ pub async fn use_sheet(&mut self, sheet: SheetName) -> Result<(), std::io::Error> {
+ let sheet = snake_case!(sheet);
+
+ // Check if the sheet is already in use
+ if self.sheet_in_use().is_some() {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::AlreadyExists,
+ "Sheet already in use",
+ ));
+ };
+
+ // Check if the local path exists
+ let local_path = self.get_local_path().await?;
+
+ // Get latest info
+ let Ok(latest_info) = LatestInfo::read_from(LatestInfo::latest_info_path(
+ &local_path,
+ &self.current_account(),
+ ))
+ .await
+ else {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::NotFound,
+ "No latest info found",
+ ));
+ };
+
+ // Check if the sheet exists
+ if !latest_info.visible_sheets.contains(&sheet) {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::NotFound,
+ "Sheet not found",
+ ));
+ }
+
+ // Check if there are any files or folders other than .jv
+ self.check_local_path_empty(&local_path).await?;
+
+ // Get the draft folder path
+ let draft_folder = self.draft_folder(&self.using_account, &sheet, &local_path);
+
+ if draft_folder.exists() {
+ // Exists
+ // Move the contents of the draft folder to the local path with rollback support
+ self.move_draft_to_local(&draft_folder, &local_path).await?;
+ }
+
+ self.sheet_in_use = Some(sheet);
+ LocalConfig::write(self).await?;
+
+ Ok(())
+ }
+
+ /// Exit the currently used sheet
+ pub async fn exit_sheet(&mut self) -> Result<(), std::io::Error> {
+ // Check if the sheet is already in use
+ if self.sheet_in_use().is_none() {
+ return Ok(());
+ }
+
+ // Check if the local path exists
+ let local_path = self.get_local_path().await?;
+
+ // Get the current sheet name
+ let sheet_name = self.sheet_in_use().as_ref().unwrap().clone();
+
+ // Get the draft folder path
+ let draft_folder = self.draft_folder(&self.using_account, &sheet_name, &local_path);
+
+ // Create the draft folder if it doesn't exist
+ if !draft_folder.exists() {
+ std::fs::create_dir_all(&draft_folder).map_err(std::io::Error::other)?;
+ }
+
+ // Move all files and folders (except .jv folder) to the draft folder with rollback support
+ self.move_local_to_draft(&local_path, &draft_folder).await?;
+
+ // Clear the sheet in use
+ self.sheet_in_use = None;
+ LocalConfig::write(self).await?;
+
+ Ok(())
+ }
+
+ /// Get local path or return error
+ async fn get_local_path(&self) -> Result<PathBuf, std::io::Error> {
+ current_local_path().ok_or_else(|| {
+ std::io::Error::new(std::io::ErrorKind::NotFound, "Fail to get local path")
+ })
+ }
+
+ /// Check if local path is empty (except for .jv folder)
+ async fn check_local_path_empty(&self, local_path: &Path) -> Result<(), std::io::Error> {
+ let jv_folder = local_path.join(CLIENT_PATH_WORKSPACE_ROOT);
+ let mut entries = std::fs::read_dir(local_path).map_err(std::io::Error::other)?;
+
+ if entries.any(|entry| {
+ if let Ok(entry) = entry {
+ let path = entry.path();
+ path != jv_folder
+ && path.file_name().and_then(|s| s.to_str())
+ != Some(CLIENT_FOLDER_WORKSPACE_ROOT_NAME)
+ } else {
+ false
+ }
+ }) {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::DirectoryNotEmpty,
+ "Local path is not empty!",
+ ));
+ }
+
+ Ok(())
+ }
+
+ /// Move contents from draft folder to local path with rollback support
+ async fn move_draft_to_local(
+ &self,
+ draft_folder: &Path,
+ local_path: &Path,
+ ) -> Result<(), std::io::Error> {
+ let draft_entries: Vec<_> = std::fs::read_dir(draft_folder)
+ .map_err(std::io::Error::other)?
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(std::io::Error::other)?;
+
+ let mut moved_items: Vec<MovedItem> = Vec::new();
+
+ for entry in &draft_entries {
+ let entry_path = entry.path();
+ let target_path = local_path.join(entry_path.file_name().unwrap());
+
+ // Move each file/directory from draft folder to local path
+ std::fs::rename(&entry_path, &target_path).map_err(|e| {
+ // Rollback all previously moved items
+ for moved_item in &moved_items {
+ let _ = std::fs::rename(&moved_item.target, &moved_item.source);
+ }
+ std::io::Error::other(e)
+ })?;
+
+ moved_items.push(MovedItem {
+ source: entry_path.clone(),
+ target: target_path.clone(),
+ });
+ }
+
+ // Remove the now-empty draft folder
+ std::fs::remove_dir(draft_folder).map_err(|e| {
+ // Rollback all moved items if folder removal fails
+ for moved_item in &moved_items {
+ let _ = std::fs::rename(&moved_item.target, &moved_item.source);
+ }
+ std::io::Error::other(e)
+ })?;
+
+ Ok(())
+ }
+
+ /// Move contents from local path to draft folder with rollback support (except .jv folder)
+ async fn move_local_to_draft(
+ &self,
+ local_path: &Path,
+ draft_folder: &Path,
+ ) -> Result<(), std::io::Error> {
+ let jv_folder = local_path.join(CLIENT_PATH_WORKSPACE_ROOT);
+ let entries: Vec<_> = std::fs::read_dir(local_path)
+ .map_err(std::io::Error::other)?
+ .collect::<Result<Vec<_>, _>>()
+ .map_err(std::io::Error::other)?;
+
+ let mut moved_items: Vec<MovedItem> = Vec::new();
+
+ for entry in &entries {
+ let entry_path = entry.path();
+
+ // Skip the .jv folder
+ if entry_path == jv_folder
+ || entry_path.file_name().and_then(|s| s.to_str())
+ == Some(CLIENT_FOLDER_WORKSPACE_ROOT_NAME)
+ {
+ continue;
+ }
+
+ let target_path = draft_folder.join(entry_path.file_name().unwrap());
+
+ // Move each file/directory from local path to draft folder
+ std::fs::rename(&entry_path, &target_path).map_err(|e| {
+ // Rollback all previously moved items
+ for moved_item in &moved_items {
+ let _ = std::fs::rename(&moved_item.target, &moved_item.source);
+ }
+ std::io::Error::other(e)
+ })?;
+
+ moved_items.push(MovedItem {
+ source: entry_path.clone(),
+ target: target_path.clone(),
+ });
+ }
+
+ Ok(())
+ }
+
+ /// Get the currently used account
+ pub fn current_account(&self) -> MemberId {
+ self.using_account.clone()
+ }
+
+ /// Check if the current member is interacting as a host.
+ pub fn is_host_mode(&self) -> bool {
+ self.using_host_mode
+ }
+
+ /// Check if the local workspace is stained.
+ pub fn stained(&self) -> bool {
+ self.stained_uuid.is_some()
+ }
+
+ /// Get the UUID of the vault that the local workspace is stained with.
+ pub fn stained_uuid(&self) -> Option<VaultUuid> {
+ self.stained_uuid
+ }
+
+ /// Stain the local workspace with the given UUID.
+ pub fn stain(&mut self, uuid: VaultUuid) {
+ self.stained_uuid = Some(uuid);
+ }
+
+ /// Unstain the local workspace.
+ pub fn unstain(&mut self) {
+ self.stained_uuid = None;
+ }
+
+ /// Get the upstream address.
+ pub fn upstream_addr(&self) -> SocketAddr {
+ self.upstream_addr
+ }
+
+ /// Get the currently used sheet
+ pub fn sheet_in_use(&self) -> &Option<SheetName> {
+ &self.sheet_in_use
+ }
+
+ /// Get draft folder
+ pub fn draft_folder(
+ &self,
+ account: &MemberId,
+ sheet_name: &SheetName,
+ local_workspace_path: impl Into<PathBuf>,
+ ) -> PathBuf {
+ let account_str = snake_case!(account.as_str());
+ let sheet_name_str = snake_case!(sheet_name.as_str());
+ let draft_path = CLIENT_PATH_LOCAL_DRAFT
+ .replace(KEY_ACCOUNT, &account_str)
+ .replace(KEY_SHEET_NAME, &sheet_name_str);
+ local_workspace_path.into().join(draft_path)
+ }
+
+ /// Get current draft folder
+ pub fn current_draft_folder(&self) -> Option<PathBuf> {
+ let Some(sheet_name) = self.sheet_in_use() else {
+ return None;
+ };
+
+ let current_dir = current_local_path()?;
+
+ Some(self.draft_folder(&self.using_account, sheet_name, current_dir))
+ }
+}
+
+#[derive(Clone)]
+struct MovedItem {
+ source: PathBuf,
+ target: PathBuf,
+}
diff --git a/legacy_data/src/data/member.rs b/legacy_data/src/data/member.rs
new file mode 100644
index 0000000..7e99488
--- /dev/null
+++ b/legacy_data/src/data/member.rs
@@ -0,0 +1,71 @@
+use std::collections::HashMap;
+
+use cfg_file::ConfigFile;
+use serde::{Deserialize, Serialize};
+use string_proc::snake_case;
+
+pub type MemberId = String;
+
+#[derive(Debug, Eq, Clone, ConfigFile, Serialize, Deserialize)]
+pub struct Member {
+ /// Member ID, the unique identifier of the member
+ #[serde(rename = "id")]
+ id: String,
+
+ /// Member metadata
+ #[serde(rename = "meta")]
+ metadata: HashMap<String, String>,
+}
+
+impl Default for Member {
+ fn default() -> Self {
+ Self::new("default_user")
+ }
+}
+
+impl PartialEq for Member {
+ fn eq(&self, other: &Self) -> bool {
+ self.id == other.id
+ }
+}
+
+impl std::fmt::Display for Member {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{}", self.id)
+ }
+}
+
+impl std::convert::AsRef<str> for Member {
+ fn as_ref(&self) -> &str {
+ &self.id
+ }
+}
+
+impl Member {
+ /// Create member struct by id
+ pub fn new(new_id: impl Into<String>) -> Self {
+ Self {
+ id: snake_case!(new_id.into()),
+ metadata: HashMap::new(),
+ }
+ }
+
+ /// Get member id
+ pub fn id(&self) -> String {
+ self.id.clone()
+ }
+
+ /// Get metadata
+ pub fn metadata(&self, key: impl Into<String>) -> Option<&String> {
+ self.metadata.get(&key.into())
+ }
+
+ /// Set metadata
+ pub fn set_metadata(
+ &mut self,
+ key: impl AsRef<str>,
+ value: impl Into<String>,
+ ) -> Option<String> {
+ self.metadata.insert(key.as_ref().to_string(), value.into())
+ }
+}
diff --git a/legacy_data/src/data/sheet.rs b/legacy_data/src/data/sheet.rs
new file mode 100644
index 0000000..8b427e9
--- /dev/null
+++ b/legacy_data/src/data/sheet.rs
@@ -0,0 +1,278 @@
+use std::{collections::HashMap, path::PathBuf};
+
+use cfg_file::{ConfigFile, config::ConfigFile};
+use serde::{Deserialize, Serialize};
+
+use crate::{
+ constants::{KEY_SHEET_NAME, SERVER_FILE_SHEET},
+ data::{
+ member::MemberId,
+ vault::{
+ Vault,
+ virtual_file::{VirtualFileId, VirtualFileVersion},
+ },
+ },
+};
+
+pub type SheetName = String;
+pub type SheetPathBuf = PathBuf;
+
+pub struct Sheet<'a> {
+ /// The name of the current sheet
+ pub(crate) name: SheetName,
+
+ /// Sheet data
+ pub(crate) data: SheetData,
+
+ /// Sheet path
+ pub(crate) vault_reference: &'a Vault,
+}
+
+#[derive(Default, Serialize, Deserialize, ConfigFile, Clone)]
+pub struct SheetData {
+ /// The write count of the current sheet
+ #[serde(rename = "v")]
+ pub(crate) write_count: i32,
+
+ /// The holder of the current sheet, who has full operation rights to the sheet mapping
+ #[serde(rename = "holder")]
+ pub(crate) holder: Option<MemberId>,
+
+ /// Mapping of sheet paths to virtual file IDs
+ #[serde(rename = "map")]
+ pub(crate) mapping: HashMap<SheetPathBuf, SheetMappingMetadata>,
+
+ /// Mapping of virtual file Ids to sheet paths
+ #[serde(rename = "id_map")]
+ pub(crate) id_mapping: Option<HashMap<VirtualFileId, SheetPathBuf>>,
+}
+
+#[derive(Debug, Default, Serialize, Deserialize, ConfigFile, Clone, Eq, PartialEq)]
+pub struct SheetMappingMetadata {
+ #[serde(rename = "id")]
+ pub id: VirtualFileId,
+ #[serde(rename = "ver")]
+ pub version: VirtualFileVersion,
+}
+
+impl<'a> Sheet<'a> {
+ pub fn name(&self) -> &SheetName {
+ &self.name
+ }
+
+ /// Get the holder of this sheet
+ pub fn holder(&self) -> Option<&MemberId> {
+ self.data.holder.as_ref()
+ }
+
+ /// Get the mapping of this sheet
+ pub fn mapping(&self) -> &HashMap<SheetPathBuf, SheetMappingMetadata> {
+ &self.data.mapping
+ }
+
+ /// Get the muttable mapping of this sheet
+ pub fn mapping_mut(&mut self) -> &mut HashMap<SheetPathBuf, SheetMappingMetadata> {
+ &mut self.data.mapping
+ }
+
+ /// Get the id_mapping of this sheet data
+ pub fn id_mapping(&self) -> &Option<HashMap<VirtualFileId, SheetPathBuf>> {
+ &self.data.id_mapping
+ }
+
+ /// Get the write count of this sheet
+ pub fn write_count(&self) -> i32 {
+ self.data.write_count
+ }
+
+ /// Forget the holder of this sheet
+ pub fn forget_holder(&mut self) {
+ self.data.holder = None;
+ }
+
+ /// Set the holder of this sheet
+ pub fn set_holder(&mut self, holder: MemberId) {
+ self.data.holder = Some(holder);
+ }
+
+ /// Add (or Edit) a mapping entry to the sheet
+ ///
+ /// This operation performs safety checks to ensure the member has the right to add the mapping:
+ /// 1. The sheet must have a holder (member) to perform this operation
+ /// 2. If the virtual file ID doesn't exist in the vault, the mapping is added directly
+ /// 3. If the virtual file exists, the mapping is added regardless of member edit rights
+ ///
+ /// Note: Full validation adds overhead - avoid frequent calls
+ pub async fn add_mapping(
+ &mut self,
+ sheet_path: SheetPathBuf,
+ virtual_file_id: VirtualFileId,
+ version: VirtualFileVersion,
+ ) -> Result<(), std::io::Error> {
+ // Check if the virtual file exists in the vault
+ if self.vault_reference.virtual_file(&virtual_file_id).is_err() {
+ // Virtual file doesn't exist, add the mapping directly
+ self.data.mapping.insert(
+ sheet_path,
+ SheetMappingMetadata {
+ id: virtual_file_id,
+ version,
+ },
+ );
+ return Ok(());
+ }
+
+ // Check if the sheet has a holder
+ let Some(_) = self.holder() else {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::PermissionDenied,
+ "This sheet has no holder",
+ ));
+ };
+
+ self.data.mapping.insert(
+ sheet_path,
+ SheetMappingMetadata {
+ id: virtual_file_id,
+ version,
+ },
+ );
+
+ Ok(())
+ }
+
+ /// Remove a mapping entry from the sheet
+ ///
+ /// This operation performs safety checks to ensure the member has the right to remove the mapping:
+ /// 1. The sheet must have a holder (member) to perform this operation
+ /// 2. Member must NOT have edit rights to the virtual file to release it (ensuring clear ownership)
+ /// 3. If the virtual file doesn't exist, the mapping is removed but no ID is returned
+ /// 4. If member has no edit rights and the file exists, returns the removed virtual file ID
+ ///
+ /// Note: Full validation adds overhead - avoid frequent calls
+ pub async fn remove_mapping(
+ &mut self,
+ sheet_path: &SheetPathBuf,
+ ) -> Option<SheetMappingMetadata> {
+ let virtual_file_meta = match self.data.mapping.get(sheet_path) {
+ Some(id) => id,
+ None => {
+ // The mapping entry doesn't exist, nothing to remove
+ return None;
+ }
+ };
+
+ // Check if the virtual file exists in the vault
+ if self
+ .vault_reference
+ .virtual_file(&virtual_file_meta.id)
+ .is_err()
+ {
+ // Virtual file doesn't exist, remove the mapping and return None
+ self.data.mapping.remove(sheet_path);
+ return None;
+ }
+
+ // Check if the sheet has a holder
+ let holder = self.holder()?;
+
+ // Check if the holder has edit rights to the virtual file
+ match self
+ .vault_reference
+ .has_virtual_file_edit_right(holder, &virtual_file_meta.id)
+ .await
+ {
+ Ok(false) => {
+ // Holder doesn't have rights, remove and return the virtual file ID
+ self.data.mapping.remove(sheet_path)
+ }
+ Ok(true) => {
+ // Holder has edit rights, don't remove the mapping
+ None
+ }
+ Err(_) => {
+ // Error checking rights, don't remove the mapping
+ None
+ }
+ }
+ }
+
+ /// Persist the sheet to disk
+ ///
+ /// Why not use a reference?
+ /// Because I don't want a second instance of the sheet to be kept in memory.
+ /// If needed, please deserialize and reload it.
+ pub async fn persist(mut self) -> Result<(), std::io::Error> {
+ self.data.write_count += 1;
+
+ // Update id mapping
+ self.data.id_mapping = Some(HashMap::new());
+ for map in self.data.mapping.iter() {
+ self.data
+ .id_mapping
+ .as_mut()
+ .unwrap()
+ .insert(map.1.id.clone(), map.0.clone());
+ }
+
+ // Add write count
+ if self.data.write_count >= i32::MAX - 1 {
+ self.data.write_count = 0;
+ }
+ SheetData::write_to(&self.data, self.sheet_path()).await
+ }
+
+ /// Get the path to the sheet file
+ pub fn sheet_path(&self) -> PathBuf {
+ Sheet::sheet_path_with_name(self.vault_reference, &self.name)
+ }
+
+ /// Get the path to the sheet file with the given name
+ pub fn sheet_path_with_name(vault: &Vault, name: impl AsRef<str>) -> PathBuf {
+ vault
+ .vault_path()
+ .join(SERVER_FILE_SHEET.replace(KEY_SHEET_NAME, name.as_ref()))
+ }
+
+ /// Clone the data of the sheet
+ pub fn clone_data(&self) -> SheetData {
+ self.data.clone()
+ }
+
+ /// Convert the sheet into its data representation
+ pub fn to_data(self) -> SheetData {
+ self.data
+ }
+}
+
+impl SheetData {
+ /// Get the write count of this sheet data
+ pub fn write_count(&self) -> i32 {
+ self.write_count
+ }
+
+ /// Get the holder of this sheet data
+ pub fn holder(&self) -> Option<&MemberId> {
+ self.holder.as_ref()
+ }
+
+ /// Get the mapping of this sheet data
+ pub fn mapping(&self) -> &HashMap<SheetPathBuf, SheetMappingMetadata> {
+ &self.mapping
+ }
+
+ /// Get the muttable mapping of this sheet data
+ pub fn mapping_mut(&mut self) -> &mut HashMap<SheetPathBuf, SheetMappingMetadata> {
+ &mut self.mapping
+ }
+
+ /// Get the id_mapping of this sheet data
+ pub fn id_mapping(&self) -> &Option<HashMap<VirtualFileId, SheetPathBuf>> {
+ &self.id_mapping
+ }
+
+ /// Get the muttable id_mapping of this sheet data
+ pub fn id_mapping_mut(&mut self) -> &mut Option<HashMap<VirtualFileId, SheetPathBuf>> {
+ &mut self.id_mapping
+ }
+}
diff --git a/legacy_data/src/data/user.rs b/legacy_data/src/data/user.rs
new file mode 100644
index 0000000..a2326fa
--- /dev/null
+++ b/legacy_data/src/data/user.rs
@@ -0,0 +1,28 @@
+use crate::env::current_cfg_dir;
+use std::path::PathBuf;
+
+pub mod accounts;
+
+pub struct UserDirectory {
+ local_path: PathBuf,
+}
+
+impl UserDirectory {
+ /// Create a user ditectory struct from the current system's document directory
+ pub fn current_cfg_dir() -> Option<Self> {
+ Some(UserDirectory {
+ local_path: current_cfg_dir()?,
+ })
+ }
+
+ /// Create a user directory struct from a specified directory path
+ /// Returns None if the directory does not exist
+ pub fn from_path<P: Into<PathBuf>>(path: P) -> Option<Self> {
+ let local_path = path.into();
+ if local_path.exists() {
+ Some(UserDirectory { local_path })
+ } else {
+ None
+ }
+ }
+}
diff --git a/legacy_data/src/data/user/accounts.rs b/legacy_data/src/data/user/accounts.rs
new file mode 100644
index 0000000..def2677
--- /dev/null
+++ b/legacy_data/src/data/user/accounts.rs
@@ -0,0 +1,162 @@
+use std::{
+ fs,
+ io::{Error, ErrorKind},
+ path::PathBuf,
+};
+
+use cfg_file::config::ConfigFile;
+
+use crate::{
+ constants::{KEY_SELF_ID, USER_FILE_ACCOUNTS, USER_FILE_KEY, USER_FILE_MEMBER},
+ data::{
+ member::{Member, MemberId},
+ user::UserDirectory,
+ },
+};
+
+/// Account Management
+impl UserDirectory {
+ /// Read account from configuration file
+ pub async fn account(&self, id: &MemberId) -> Result<Member, std::io::Error> {
+ if let Some(cfg_file) = self.account_cfg(id) {
+ let member = Member::read_from(cfg_file).await?;
+ return Ok(member);
+ }
+
+ Err(Error::new(ErrorKind::NotFound, "Account not found!"))
+ }
+
+ /// List all account IDs in the user directory
+ pub fn account_ids(&self) -> Result<Vec<MemberId>, std::io::Error> {
+ let accounts_path = self
+ .local_path
+ .join(USER_FILE_ACCOUNTS.replace(KEY_SELF_ID, ""));
+
+ if !accounts_path.exists() {
+ return Ok(Vec::new());
+ }
+
+ let mut account_ids = Vec::new();
+
+ for entry in fs::read_dir(accounts_path)? {
+ let entry = entry?;
+ let path = entry.path();
+
+ if path.is_file()
+ && let Some(file_name) = path.file_stem().and_then(|s| s.to_str())
+ && path.extension().and_then(|s| s.to_str()) == Some("toml")
+ {
+ // Remove the "_private" suffix from key files if present
+ let account_id = file_name.replace("_private", "");
+ account_ids.push(account_id);
+ }
+ }
+
+ Ok(account_ids)
+ }
+
+ /// Get all accounts
+ /// This method will read and deserialize account information, please pay attention to performance issues
+ pub async fn accounts(&self) -> Result<Vec<Member>, std::io::Error> {
+ let mut accounts = Vec::new();
+
+ for account_id in self.account_ids()? {
+ if let Ok(account) = self.account(&account_id).await {
+ accounts.push(account);
+ }
+ }
+
+ Ok(accounts)
+ }
+
+ /// Update account info
+ pub async fn update_account(&self, member: Member) -> Result<(), std::io::Error> {
+ // Ensure account exist
+ if self.account_cfg(&member.id()).is_some() {
+ let account_cfg_path = self.account_cfg_path(&member.id());
+ Member::write_to(&member, account_cfg_path).await?;
+ return Ok(());
+ }
+
+ Err(Error::new(ErrorKind::NotFound, "Account not found!"))
+ }
+
+ /// Register an account to user directory
+ pub async fn register_account(&self, member: Member) -> Result<(), std::io::Error> {
+ // Ensure account not exist
+ if self.account_cfg(&member.id()).is_some() {
+ return Err(Error::new(
+ ErrorKind::DirectoryNotEmpty,
+ format!("Account `{}` already registered!", member.id()),
+ ));
+ }
+
+ // Ensure accounts directory exists
+ let accounts_dir = self
+ .local_path
+ .join(USER_FILE_ACCOUNTS.replace(KEY_SELF_ID, ""));
+ if !accounts_dir.exists() {
+ fs::create_dir_all(&accounts_dir)?;
+ }
+
+ // Write config file to accounts dir
+ let account_cfg_path = self.account_cfg_path(&member.id());
+ Member::write_to(&member, account_cfg_path).await?;
+
+ Ok(())
+ }
+
+ /// Remove account from user directory
+ pub fn remove_account(&self, id: &MemberId) -> Result<(), std::io::Error> {
+ // Remove config file if exists
+ if let Some(account_cfg_path) = self.account_cfg(id) {
+ fs::remove_file(account_cfg_path)?;
+ }
+
+ // Remove private key file if exists
+ if let Some(private_key_path) = self.account_private_key(id)
+ && private_key_path.exists()
+ {
+ fs::remove_file(private_key_path)?;
+ }
+
+ Ok(())
+ }
+
+ /// Try to get the account's configuration file to determine if the account exists
+ pub fn account_cfg(&self, id: &MemberId) -> Option<PathBuf> {
+ let cfg_file = self.account_cfg_path(id);
+ if cfg_file.exists() {
+ Some(cfg_file)
+ } else {
+ None
+ }
+ }
+
+ /// Try to get the account's private key file to determine if the account has a private key
+ pub fn account_private_key(&self, id: &MemberId) -> Option<PathBuf> {
+ let key_file = self.account_private_key_path(id);
+ if key_file.exists() {
+ Some(key_file)
+ } else {
+ None
+ }
+ }
+
+ /// Check if account has private key
+ pub fn has_private_key(&self, id: &MemberId) -> bool {
+ self.account_private_key(id).is_some()
+ }
+
+ /// Get the account's configuration file path, but do not check if the file exists
+ pub fn account_cfg_path(&self, id: &MemberId) -> PathBuf {
+ self.local_path
+ .join(USER_FILE_MEMBER.replace(KEY_SELF_ID, id.to_string().as_str()))
+ }
+
+ /// Get the account's private key file path, but do not check if the file exists
+ pub fn account_private_key_path(&self, id: &MemberId) -> PathBuf {
+ self.local_path
+ .join(USER_FILE_KEY.replace(KEY_SELF_ID, id.to_string().as_str()))
+ }
+}
diff --git a/legacy_data/src/data/vault.rs b/legacy_data/src/data/vault.rs
new file mode 100644
index 0000000..0f93016
--- /dev/null
+++ b/legacy_data/src/data/vault.rs
@@ -0,0 +1,132 @@
+use std::{env::current_dir, path::PathBuf, sync::Arc};
+
+use tokio::fs::create_dir_all;
+use vcs_docs::docs::READMES_VAULT_README;
+
+use crate::{
+ constants::{
+ REF_SHEET_NAME, SERVER_FILE_README, SERVER_FILE_VAULT, SERVER_PATH_MEMBER_PUB,
+ SERVER_PATH_MEMBERS, SERVER_PATH_SHEETS, SERVER_PATH_VF_ROOT, VAULT_HOST_NAME,
+ },
+ data::{member::Member, vault::vault_config::VaultConfig},
+ env::{current_vault_path, find_vault_path},
+};
+
+pub mod lock_status;
+pub mod mapping_share;
+pub mod member_manage;
+pub mod sheet_manage;
+pub mod vault_config;
+pub mod virtual_file;
+
+pub struct Vault {
+ config: Arc<VaultConfig>,
+ vault_path: PathBuf,
+}
+
+impl Vault {
+ /// Get vault path
+ pub fn vault_path(&self) -> &PathBuf {
+ &self.vault_path
+ }
+
+ /// Initialize vault
+ pub fn init(config: VaultConfig, vault_path: impl Into<PathBuf>) -> Option<Self> {
+ let vault_path = find_vault_path(vault_path)?;
+ Some(Self {
+ config: Arc::new(config),
+ vault_path,
+ })
+ }
+
+ /// Initialize vault
+ pub fn init_current_dir(config: VaultConfig) -> Option<Self> {
+ let vault_path = current_vault_path()?;
+ Some(Self {
+ config: Arc::new(config),
+ vault_path,
+ })
+ }
+
+ /// Setup vault
+ pub async fn setup_vault(
+ vault_path: impl Into<PathBuf>,
+ vault_name: impl AsRef<str>,
+ ) -> Result<(), std::io::Error> {
+ let vault_path: PathBuf = vault_path.into();
+
+ // Ensure directory is empty
+ if vault_path.exists() && vault_path.read_dir()?.next().is_some() {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::DirectoryNotEmpty,
+ "DirectoryNotEmpty",
+ ));
+ }
+
+ // 1. Setup main config
+ let config = VaultConfig::default();
+
+ // NOTE:
+ // Do not use the write_to method provided by the ConfigFile trait to store the Vault configuration file
+ // Instead, use the PROFILES_VAULT content provided by the Documents Repository for writing
+
+ // VaultConfig::write_to(&config, vault_path.join(SERVER_FILE_VAULT)).await?;
+ let config_content = vcs_docs::docs::PROFILES_VAULT
+ .replace("{vault_name}", vault_name.as_ref())
+ .replace("{user_name}", whoami::username().as_str())
+ .replace(
+ "{date_format}",
+ chrono::Local::now()
+ .format("%Y-%m-%d %H:%M")
+ .to_string()
+ .as_str(),
+ )
+ .replace("{vault_uuid}", &config.vault_uuid().to_string());
+ tokio::fs::write(vault_path.join(SERVER_FILE_VAULT), config_content).await?;
+
+ // 2. Setup sheets directory
+ create_dir_all(vault_path.join(SERVER_PATH_SHEETS)).await?;
+
+ // 3. Setup key directory
+ create_dir_all(vault_path.join(SERVER_PATH_MEMBER_PUB)).await?;
+
+ // 4. Setup member directory
+ create_dir_all(vault_path.join(SERVER_PATH_MEMBERS)).await?;
+
+ // 5. Setup storage directory
+ create_dir_all(vault_path.join(SERVER_PATH_VF_ROOT)).await?;
+
+ let Some(vault) = Vault::init(config, &vault_path) else {
+ return Err(std::io::Error::other("Failed to initialize vault"));
+ };
+
+ // 6. Create host member
+ vault
+ .register_member_to_vault(Member::new(VAULT_HOST_NAME))
+ .await?;
+
+ // 7. Setup reference sheet
+ vault
+ .create_sheet(&REF_SHEET_NAME.to_string(), &VAULT_HOST_NAME.to_string())
+ .await?;
+
+ // Final, generate README.md
+ let readme_content = READMES_VAULT_README;
+ tokio::fs::write(vault_path.join(SERVER_FILE_README), readme_content).await?;
+
+ Ok(())
+ }
+
+ /// Setup vault in current directory
+ pub async fn setup_vault_current_dir(
+ vault_name: impl AsRef<str>,
+ ) -> Result<(), std::io::Error> {
+ Self::setup_vault(current_dir()?, vault_name).await?;
+ Ok(())
+ }
+
+ /// Get vault configuration
+ pub fn config(&self) -> &Arc<VaultConfig> {
+ &self.config
+ }
+}
diff --git a/legacy_data/src/data/vault/lock_status.rs b/legacy_data/src/data/vault/lock_status.rs
new file mode 100644
index 0000000..3f59c30
--- /dev/null
+++ b/legacy_data/src/data/vault/lock_status.rs
@@ -0,0 +1,40 @@
+use std::path::PathBuf;
+
+use crate::{constants::SERVER_FILE_LOCKFILE, data::vault::Vault};
+
+impl Vault {
+ /// Get the path of the lock file for the current Vault
+ pub fn lock_file_path(&self) -> PathBuf {
+ self.vault_path().join(SERVER_FILE_LOCKFILE)
+ }
+
+ /// Check if the current Vault is locked
+ pub fn is_locked(&self) -> bool {
+ self.lock_file_path().exists()
+ }
+
+ /// Lock the current Vault
+ pub fn lock(&self) -> Result<(), std::io::Error> {
+ if self.is_locked() {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::AlreadyExists,
+ format!(
+ "Vault is locked! This indicates a service is already running here.\nPlease stop other services or delete the lock file at the vault root directory: {}",
+ self.lock_file_path().display()
+ ),
+ ));
+ }
+ std::fs::File::create(self.lock_file_path())?;
+ Ok(())
+ }
+
+ /// Unlock the current Vault
+ pub fn unlock(&self) -> Result<(), std::io::Error> {
+ if let Err(e) = std::fs::remove_file(self.lock_file_path())
+ && e.kind() != std::io::ErrorKind::NotFound
+ {
+ return Err(e);
+ }
+ Ok(())
+ }
+}
diff --git a/legacy_data/src/data/vault/mapping_share.rs b/legacy_data/src/data/vault/mapping_share.rs
new file mode 100644
index 0000000..5d27859
--- /dev/null
+++ b/legacy_data/src/data/vault/mapping_share.rs
@@ -0,0 +1,422 @@
+use std::{collections::HashMap, io::Error, path::PathBuf};
+
+use cfg_file::{ConfigFile, config::ConfigFile};
+use rand::{Rng, rng};
+use serde::{Deserialize, Serialize};
+use string_proc::{format_path, snake_case};
+use tokio::fs;
+
+use crate::{
+ constants::{
+ KEY_SHARE_ID, KEY_SHEET_NAME, SERVER_FILE_SHEET_SHARE, SERVER_PATH_SHARES,
+ SERVER_SUFFIX_SHEET_SHARE_FILE_NO_DOT,
+ },
+ data::{
+ member::MemberId,
+ sheet::{Sheet, SheetMappingMetadata, SheetName, SheetPathBuf},
+ vault::Vault,
+ },
+};
+
+pub type SheetShareId = String;
+
+#[derive(Default, Serialize, Deserialize, ConfigFile, Clone, Debug)]
+pub struct Share {
+ /// Sharer: the member who created this share item
+ #[serde(rename = "sharer")]
+ pub sharer: MemberId,
+
+ /// Description of the share item
+ #[serde(rename = "desc")]
+ pub description: String,
+
+ /// Metadata path
+ #[serde(skip)]
+ pub path: Option<PathBuf>,
+
+ /// From: which sheet the member exported the file from
+ #[serde(rename = "from")]
+ pub from_sheet: SheetName,
+
+ /// Mappings: the sheet mappings contained in the share item
+ #[serde(rename = "map")]
+ pub mappings: HashMap<SheetPathBuf, SheetMappingMetadata>,
+}
+
+#[derive(Default, Serialize, Deserialize, ConfigFile, Clone, PartialEq, Eq)]
+pub enum ShareMergeMode {
+ /// If a path or file already exists during merge, prioritize the incoming share
+ /// Path conflict: replace the mapping content at the local path with the incoming content
+ /// File conflict: delete the original file mapping and create a new one
+ Overwrite,
+
+ /// If a path or file already exists during merge, skip overwriting this entry
+ Skip,
+
+ /// Pre-check for conflicts, prohibit merging if any conflicts are found
+ #[default]
+ Safe,
+
+ /// Reject all shares
+ RejectAll,
+}
+
+#[derive(Default, Serialize, Deserialize, ConfigFile, Clone)]
+pub struct ShareMergeConflict {
+ /// Duplicate mappings exist
+ pub duplicate_mapping: Vec<PathBuf>,
+
+ /// Duplicate files exist
+ pub duplicate_file: Vec<PathBuf>,
+}
+
+impl ShareMergeConflict {
+ /// Check if there are no conflicts
+ pub fn ok(&self) -> bool {
+ self.duplicate_mapping.is_empty() && self.duplicate_file.is_empty()
+ }
+}
+
+impl Vault {
+ /// Get the path of a share item in a sheet
+ pub fn share_file_path(&self, sheet_name: &SheetName, share_id: &SheetShareId) -> PathBuf {
+ let sheet_name = snake_case!(sheet_name.clone());
+ let share_id = share_id.clone();
+
+ // Format the path to remove "./" prefix and normalize it
+ let path_str = SERVER_FILE_SHEET_SHARE
+ .replace(KEY_SHEET_NAME, &sheet_name)
+ .replace(KEY_SHARE_ID, &share_id);
+
+ // Use format_path to normalize the path
+ match format_path::format_path_str(&path_str) {
+ Ok(normalized_path) => self.vault_path().join(normalized_path),
+ Err(_) => {
+ // Fallback to original behavior if formatting fails
+ self.vault_path().join(path_str)
+ }
+ }
+ }
+
+ /// Get the actual paths of all share items in a sheet
+ pub async fn share_file_paths(&self, sheet_name: &SheetName) -> Vec<PathBuf> {
+ let sheet_name = snake_case!(sheet_name.clone());
+ let shares_dir = self
+ .vault_path()
+ .join(SERVER_PATH_SHARES.replace(KEY_SHEET_NAME, &sheet_name));
+
+ let mut result = Vec::new();
+ if let Ok(mut entries) = fs::read_dir(shares_dir).await {
+ while let Ok(Some(entry)) = entries.next_entry().await {
+ let path = entry.path();
+ if path.is_file()
+ && path.extension().and_then(|s| s.to_str())
+ == Some(SERVER_SUFFIX_SHEET_SHARE_FILE_NO_DOT)
+ {
+ result.push(path);
+ }
+ }
+ }
+ result
+ }
+}
+
+impl<'a> Sheet<'a> {
+ /// Get the shares of a sheet
+ pub async fn get_shares(&self) -> Result<Vec<Share>, std::io::Error> {
+ let paths = self.vault_reference.share_file_paths(&self.name).await;
+ let mut shares = Vec::new();
+
+ for path in paths {
+ match Share::read_from(&path).await {
+ Ok(mut share) => {
+ share.path = Some(path);
+ shares.push(share);
+ }
+ Err(e) => return Err(e),
+ }
+ }
+
+ Ok(shares)
+ }
+
+ /// Get a share of a sheet
+ pub async fn get_share(&self, share_id: &SheetShareId) -> Result<Share, std::io::Error> {
+ let path = self.vault_reference.share_file_path(&self.name, share_id);
+ let mut share = Share::read_from(&path).await?;
+ share.path = Some(path);
+ Ok(share)
+ }
+
+ /// Import a share of a sheet by its ID
+ pub async fn merge_share_by_id(
+ self,
+ share_id: &SheetShareId,
+ share_merge_mode: ShareMergeMode,
+ ) -> Result<(), std::io::Error> {
+ let share = self.get_share(share_id).await?;
+ self.merge_share(share, share_merge_mode).await
+ }
+
+ /// Import a share of a sheet
+ pub async fn merge_share(
+ mut self,
+ share: Share,
+ share_merge_mode: ShareMergeMode,
+ ) -> Result<(), std::io::Error> {
+ // Backup original data and edit based on this backup
+ let mut copy_share = share.clone();
+ let mut copy_sheet = self.clone_data();
+
+ // Pre-check
+ let conflicts = self.precheck(&copy_share);
+ let mut reject_mode = false;
+
+ match share_merge_mode {
+ // Safe mode: conflicts are not allowed
+ ShareMergeMode::Safe => {
+ // Conflicts found
+ if !conflicts.ok() {
+ // Do nothing, return Error
+ return Err(Error::new(
+ std::io::ErrorKind::AlreadyExists,
+ "Mappings or files already exist!",
+ ));
+ }
+ }
+ // Overwrite mode: when conflicts occur, prioritize the share item
+ ShareMergeMode::Overwrite => {
+ // Handle duplicate mappings
+ for path in conflicts.duplicate_mapping {
+ // Get the share data
+ let Some(share_value) = copy_share.mappings.remove(&path) else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Share value `{}` not found!", &path.display()),
+ ));
+ };
+ // Overwrite
+ copy_sheet.mapping_mut().insert(path, share_value);
+ }
+
+ // Handle duplicate IDs
+ for path in conflicts.duplicate_file {
+ // Get the share data
+ let Some(share_value) = copy_share.mappings.remove(&path) else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Share value `{}` not found!", &path.display()),
+ ));
+ };
+
+ // Extract the file ID
+ let conflict_vfid = &share_value.id;
+
+ // Through the sheet's ID mapping
+ let Some(id_mapping) = copy_sheet.id_mapping_mut() else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ "Id mapping not found!",
+ ));
+ };
+
+ // Get the original path from the ID mapping
+ let Some(raw_path) = id_mapping.remove(conflict_vfid) else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("The path of virtual file `{}' not found!", conflict_vfid),
+ ));
+ };
+
+ // Remove the original path mapping
+ if copy_sheet.mapping_mut().remove(&raw_path).is_none() {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Remove mapping `{}` failed!", &raw_path.display()),
+ ));
+ }
+ // Insert the new item
+ copy_sheet.mapping_mut().insert(path, share_value);
+ }
+ }
+ // Skip mode: when conflicts occur, prioritize the local sheet
+ ShareMergeMode::Skip => {
+ // Directly remove conflicting items
+ for path in conflicts.duplicate_mapping {
+ copy_share.mappings.remove(&path);
+ }
+ for path in conflicts.duplicate_file {
+ copy_share.mappings.remove(&path);
+ }
+ }
+ // Reject all mode: reject all shares
+ ShareMergeMode::RejectAll => {
+ reject_mode = true; // Only mark as rejected
+ }
+ }
+
+ if !reject_mode {
+ // Subsequent merging
+ copy_sheet
+ .mapping_mut()
+ .extend(copy_share.mappings.into_iter());
+
+ // Merge completed
+ self.data = copy_sheet; // Write the result
+
+ // Merge completed, consume the sheet
+ self.persist().await.map_err(|err| {
+ Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Write sheet failed: {}", err),
+ )
+ })?;
+ }
+
+ // Persistence succeeded, continue to consume the share item
+ share.remove().await.map_err(|err| {
+ Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Remove share failed: {}", err.1),
+ )
+ })
+ }
+
+ // Pre-check whether the share can be imported into the current sheet without conflicts
+ fn precheck(&self, share: &Share) -> ShareMergeConflict {
+ let mut conflicts = ShareMergeConflict::default();
+
+ for (mapping, metadata) in &share.mappings {
+ // Check for duplicate mappings
+ if self.mapping().contains_key(mapping.as_path()) {
+ conflicts.duplicate_mapping.push(mapping.clone());
+ continue;
+ }
+
+ // Check for duplicate IDs
+ if let Some(id_mapping) = self.id_mapping() {
+ if id_mapping.contains_key(&metadata.id) {
+ conflicts.duplicate_file.push(mapping.clone());
+ continue;
+ }
+ }
+ }
+
+ conflicts
+ }
+
+ /// Share mappings with another sheet
+ pub async fn share_mappings(
+ &self,
+ other_sheet: &SheetName,
+ mappings: Vec<PathBuf>,
+ sharer: &MemberId,
+ description: String,
+ ) -> Result<Share, std::io::Error> {
+ let other_sheet = snake_case!(other_sheet.clone());
+ let sharer = snake_case!(sharer.clone());
+
+ // Check if the sheet exists
+ let sheet_names = self.vault_reference.sheet_names()?;
+ if !sheet_names.contains(&other_sheet) {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Sheet `{}` not found!", &other_sheet),
+ ));
+ }
+
+ // Check if the target file exists, regenerate ID if path already exists, up to 20 attempts
+ let target_path = {
+ let mut id;
+ let mut share_path;
+ let mut attempts = 0;
+
+ loop {
+ id = Share::gen_share_id(&sharer);
+ share_path = self.vault_reference.share_file_path(&other_sheet, &id);
+
+ if !share_path.exists() {
+ break share_path;
+ }
+
+ attempts += 1;
+ if attempts >= 20 {
+ return Err(Error::new(
+ std::io::ErrorKind::AlreadyExists,
+ "Failed to generate unique share ID after 20 attempts!",
+ ));
+ }
+ }
+ };
+
+ // Validate that the share is valid
+ let mut share_mappings = HashMap::new();
+ for mapping_path in &mappings {
+ if let Some(metadata) = self.mapping().get(mapping_path) {
+ share_mappings.insert(mapping_path.clone(), metadata.clone());
+ } else {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Mapping `{}` not found in sheet!", mapping_path.display()),
+ ));
+ }
+ }
+
+ // Build share data
+ let share_data = Share {
+ sharer,
+ description,
+ path: None, // This is only needed during merging (reading), no need to serialize now
+ from_sheet: self.name.clone(),
+ mappings: share_mappings,
+ };
+
+ // Write data
+ Share::write_to(&share_data, target_path).await?;
+
+ Ok(share_data)
+ }
+}
+
+impl Share {
+ /// Generate a share ID for a given sharer
+ pub fn gen_share_id(sharer: &MemberId) -> String {
+ let sharer_snake = snake_case!(sharer.clone());
+ let random_part: String = rng()
+ .sample_iter(&rand::distr::Alphanumeric)
+ .take(8)
+ .map(char::from)
+ .collect();
+ format!("{}@{}", sharer_snake, random_part)
+ }
+
+ /// Delete a share (reject or remove the share item)
+ /// If deletion succeeds, returns `Ok(())`;
+ /// If deletion fails, returns `Err((self, std::io::Error))`, containing the original share object and the error information.
+ pub async fn remove(self) -> Result<(), (Self, std::io::Error)> {
+ let Some(path) = &self.path else {
+ return Err((
+ self,
+ Error::new(std::io::ErrorKind::NotFound, "No share path recorded!"),
+ ));
+ };
+
+ if !path.exists() {
+ return Err((
+ self,
+ Error::new(std::io::ErrorKind::NotFound, "No share file exists!"),
+ ));
+ }
+
+ match fs::remove_file(path).await {
+ Err(err) => Err((
+ self,
+ Error::new(
+ std::io::ErrorKind::Other,
+ format!("Failed to delete share file: {}", err),
+ ),
+ )),
+ Ok(_) => Ok(()),
+ }
+ }
+}
diff --git a/legacy_data/src/data/vault/member_manage.rs b/legacy_data/src/data/vault/member_manage.rs
new file mode 100644
index 0000000..9d22d09
--- /dev/null
+++ b/legacy_data/src/data/vault/member_manage.rs
@@ -0,0 +1,144 @@
+use std::{
+ fs,
+ io::{Error, ErrorKind},
+ path::PathBuf,
+};
+
+use cfg_file::config::ConfigFile;
+
+use crate::{
+ constants::{
+ SERVER_FILE_MEMBER_INFO, SERVER_FILE_MEMBER_PUB, SERVER_PATH_MEMBERS,
+ SERVER_SUFFIX_MEMBER_INFO_NO_DOT,
+ },
+ data::{
+ member::{Member, MemberId},
+ vault::Vault,
+ },
+};
+
+const ID_PARAM: &str = "{member_id}";
+
+/// Member Manage
+impl Vault {
+ /// Read member from configuration file
+ pub async fn member(&self, id: &MemberId) -> Result<Member, std::io::Error> {
+ if let Some(cfg_file) = self.member_cfg(id) {
+ let member = Member::read_from(cfg_file).await?;
+ return Ok(member);
+ }
+
+ Err(Error::new(ErrorKind::NotFound, "Member not found!"))
+ }
+
+ /// List all member IDs in the vault
+ pub fn member_ids(&self) -> Result<Vec<MemberId>, std::io::Error> {
+ let members_path = self.vault_path.join(SERVER_PATH_MEMBERS);
+
+ if !members_path.exists() {
+ return Ok(Vec::new());
+ }
+
+ let mut member_ids = Vec::new();
+
+ for entry in fs::read_dir(members_path)? {
+ let entry = entry?;
+ let path = entry.path();
+
+ if path.is_file()
+ && let Some(file_name) = path.file_stem().and_then(|s| s.to_str())
+ && path.extension().and_then(|s| s.to_str())
+ == Some(SERVER_SUFFIX_MEMBER_INFO_NO_DOT)
+ {
+ member_ids.push(file_name.to_string());
+ }
+ }
+
+ Ok(member_ids)
+ }
+
+ /// Get all members
+ /// This method will read and deserialize member information, please pay attention to performance issues
+ pub async fn members(&self) -> Result<Vec<Member>, std::io::Error> {
+ let mut members = Vec::new();
+
+ for member_id in self.member_ids()? {
+ if let Ok(member) = self.member(&member_id).await {
+ members.push(member);
+ }
+ }
+
+ Ok(members)
+ }
+
+ /// Update member info
+ pub async fn update_member(&self, member: Member) -> Result<(), std::io::Error> {
+ // Ensure member exist
+ if self.member_cfg(&member.id()).is_some() {
+ let member_cfg_path = self.member_cfg_path(&member.id());
+ Member::write_to(&member, member_cfg_path).await?;
+ return Ok(());
+ }
+
+ Err(Error::new(ErrorKind::NotFound, "Member not found!"))
+ }
+
+ /// Register a member to vault
+ pub async fn register_member_to_vault(&self, member: Member) -> Result<(), std::io::Error> {
+ // Ensure member not exist
+ if self.member_cfg(&member.id()).is_some() {
+ return Err(Error::new(
+ ErrorKind::DirectoryNotEmpty,
+ format!("Member `{}` already registered!", member.id()),
+ ));
+ }
+
+ // Wrtie config file to member dir
+ let member_cfg_path = self.member_cfg_path(&member.id());
+ Member::write_to(&member, member_cfg_path).await?;
+
+ Ok(())
+ }
+
+ /// Remove member from vault
+ pub fn remove_member_from_vault(&self, id: &MemberId) -> Result<(), std::io::Error> {
+ // Ensure member exist
+ if let Some(member_cfg_path) = self.member_cfg(id) {
+ fs::remove_file(member_cfg_path)?;
+ }
+
+ Ok(())
+ }
+
+ /// Try to get the member's configuration file to determine if the member exists
+ pub fn member_cfg(&self, id: &MemberId) -> Option<PathBuf> {
+ let cfg_file = self.member_cfg_path(id);
+ if cfg_file.exists() {
+ Some(cfg_file)
+ } else {
+ None
+ }
+ }
+
+ /// Try to get the member's public key file to determine if the member has login permission
+ pub fn member_key(&self, id: &MemberId) -> Option<PathBuf> {
+ let key_file = self.member_key_path(id);
+ if key_file.exists() {
+ Some(key_file)
+ } else {
+ None
+ }
+ }
+
+ /// Get the member's configuration file path, but do not check if the file exists
+ pub fn member_cfg_path(&self, id: &MemberId) -> PathBuf {
+ self.vault_path
+ .join(SERVER_FILE_MEMBER_INFO.replace(ID_PARAM, id.to_string().as_str()))
+ }
+
+ /// Get the member's public key file path, but do not check if the file exists
+ pub fn member_key_path(&self, id: &MemberId) -> PathBuf {
+ self.vault_path
+ .join(SERVER_FILE_MEMBER_PUB.replace(ID_PARAM, id.to_string().as_str()))
+ }
+}
diff --git a/legacy_data/src/data/vault/sheet_manage.rs b/legacy_data/src/data/vault/sheet_manage.rs
new file mode 100644
index 0000000..c22c849
--- /dev/null
+++ b/legacy_data/src/data/vault/sheet_manage.rs
@@ -0,0 +1,274 @@
+use std::{collections::HashMap, io::Error};
+
+use cfg_file::config::ConfigFile;
+use string_proc::snake_case;
+use tokio::fs;
+
+use crate::{
+ constants::{SERVER_PATH_SHEETS, SERVER_SUFFIX_SHEET_FILE_NO_DOT},
+ data::{
+ member::MemberId,
+ sheet::{Sheet, SheetData, SheetName},
+ vault::Vault,
+ },
+};
+
+/// Vault Sheets Management
+impl Vault {
+ /// Load all sheets in the vault
+ ///
+ /// It is generally not recommended to call this function frequently.
+ /// Although a vault typically won't contain too many sheets,
+ /// if individual sheet contents are large, this operation may cause
+ /// significant performance bottlenecks.
+ pub async fn sheets<'a>(&'a self) -> Result<Vec<Sheet<'a>>, std::io::Error> {
+ let sheet_names = self.sheet_names()?;
+ let mut sheets = Vec::new();
+
+ for sheet_name in sheet_names {
+ let sheet = self.sheet(&sheet_name).await?;
+ sheets.push(sheet);
+ }
+
+ Ok(sheets)
+ }
+
+ /// Search for all sheet names in the vault
+ ///
+ /// The complexity of this operation is proportional to the number of sheets,
+ /// but generally there won't be too many sheets in a Vault
+ pub fn sheet_names(&self) -> Result<Vec<SheetName>, std::io::Error> {
+ // Get the sheets directory path
+ let sheets_dir = self.vault_path.join(SERVER_PATH_SHEETS);
+
+ // If the directory doesn't exist, return an empty list
+ if !sheets_dir.exists() {
+ return Ok(vec![]);
+ }
+
+ let mut sheet_names = Vec::new();
+
+ // Iterate through all files in the sheets directory
+ for entry in std::fs::read_dir(sheets_dir)? {
+ let entry = entry?;
+ let path = entry.path();
+
+ // Check if it's a YAML file
+ if path.is_file()
+ && path
+ .extension()
+ .is_some_and(|ext| ext == SERVER_SUFFIX_SHEET_FILE_NO_DOT)
+ && let Some(file_stem) = path.file_stem().and_then(|s| s.to_str())
+ {
+ // Create a new SheetName and add it to the result list
+ sheet_names.push(file_stem.to_string());
+ }
+ }
+
+ Ok(sheet_names)
+ }
+
+ /// Read a sheet from its name
+ ///
+ /// If the sheet information is successfully found in the vault,
+ /// it will be deserialized and read as a sheet.
+ /// This is the only correct way to obtain a sheet instance.
+ pub async fn sheet<'a>(&'a self, sheet_name: &SheetName) -> Result<Sheet<'a>, std::io::Error> {
+ let sheet_name = snake_case!(sheet_name.clone());
+
+ // Get the path to the sheet file
+ let sheet_path = Sheet::sheet_path_with_name(self, &sheet_name);
+
+ // Ensure the sheet file exists
+ if !sheet_path.exists() {
+ // If the sheet does not exist, try to restore it from the trash
+ if self.restore_sheet(&sheet_name).await.is_err() {
+ // If restoration fails, return an error
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Sheet `{}` not found!", sheet_name),
+ ));
+ }
+ }
+
+ // Read the sheet data from the file
+ let data = SheetData::read_from(sheet_path).await?;
+
+ Ok(Sheet {
+ name: sheet_name.clone(),
+ data,
+ vault_reference: self,
+ })
+ }
+
+ /// Create a sheet locally and return the sheet instance
+ ///
+ /// This method creates a new sheet in the vault with the given name and holder.
+ /// It will verify that the member exists and that the sheet doesn't already exist
+ /// before creating the sheet file with default empty data.
+ pub async fn create_sheet<'a>(
+ &'a self,
+ sheet_name: &SheetName,
+ holder: &MemberId,
+ ) -> Result<Sheet<'a>, std::io::Error> {
+ let sheet_name = snake_case!(sheet_name.clone());
+
+ // Ensure member exists
+ if !self.member_cfg_path(holder).exists() {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Member `{}` not found!", &holder),
+ ));
+ }
+
+ // Ensure sheet does not already exist
+ let sheet_file_path = Sheet::sheet_path_with_name(self, &sheet_name);
+ if sheet_file_path.exists() {
+ return Err(Error::new(
+ std::io::ErrorKind::AlreadyExists,
+ format!("Sheet `{}` already exists!", &sheet_name),
+ ));
+ }
+
+ // Create the sheet file
+ let sheet_data = SheetData {
+ holder: Some(holder.clone()),
+ mapping: HashMap::new(),
+ id_mapping: None,
+ write_count: 0,
+ };
+ SheetData::write_to(&sheet_data, sheet_file_path).await?;
+
+ Ok(Sheet {
+ name: sheet_name,
+ data: sheet_data,
+ vault_reference: self,
+ })
+ }
+
+ /// Delete the sheet file from local disk by name
+ ///
+ /// This method will remove the sheet file with the given name from the vault.
+ /// It will verify that the sheet exists before attempting to delete it.
+ /// If the sheet is successfully deleted, it will return Ok(()).
+ ///
+ /// Warning: This operation is dangerous. Deleting a sheet will cause local workspaces
+ /// using this sheet to become invalid. Please ensure the sheet is not currently in use
+ /// and will not be used in the future.
+ ///
+ /// For a safer deletion method, consider using `delete_sheet_safety`.
+ ///
+ /// Note: This function is intended for server-side use only and should not be
+ /// arbitrarily called by other members to prevent unauthorized data deletion.
+ pub async fn delete_sheet(&self, sheet_name: &SheetName) -> Result<(), std::io::Error> {
+ let sheet_name = snake_case!(sheet_name.clone());
+
+ // Ensure sheet exists
+ let sheet_file_path = Sheet::sheet_path_with_name(self, &sheet_name);
+ if !sheet_file_path.exists() {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Sheet `{}` not found!", &sheet_name),
+ ));
+ }
+
+ // Delete the sheet file
+ fs::remove_file(sheet_file_path).await?;
+
+ Ok(())
+ }
+
+ /// Safely delete the sheet
+ ///
+ /// The sheet will be moved to the trash directory, ensuring it does not appear in the
+ /// results of `sheets` and `sheet_names` methods.
+ /// However, if the sheet's holder attempts to access the sheet through the `sheet` method,
+ /// the system will automatically restore it from the trash directory.
+ /// This means: the sheet will only permanently remain in the trash directory,
+ /// waiting for manual cleanup by an administrator, when it is truly no longer in use.
+ ///
+ /// This is a safer deletion method because it provides the possibility of recovery,
+ /// avoiding irreversible data loss caused by accidental deletion.
+ ///
+ /// Note: This function is intended for server-side use only and should not be
+ /// arbitrarily called by other members to prevent unauthorized data deletion.
+ pub async fn delete_sheet_safely(&self, sheet_name: &SheetName) -> Result<(), std::io::Error> {
+ let sheet_name = snake_case!(sheet_name.clone());
+
+ // Ensure the sheet exists
+ let sheet_file_path = Sheet::sheet_path_with_name(self, &sheet_name);
+ if !sheet_file_path.exists() {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Sheet `{}` not found!", &sheet_name),
+ ));
+ }
+
+ // Create the trash directory
+ let trash_dir = self.vault_path.join(".trash");
+ if !trash_dir.exists() {
+ fs::create_dir_all(&trash_dir).await?;
+ }
+
+ // Generate a unique filename in the trash
+ let timestamp = std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .unwrap()
+ .as_millis();
+ let trash_file_name = format!(
+ "{}_{}.{}",
+ sheet_name, timestamp, SERVER_SUFFIX_SHEET_FILE_NO_DOT
+ );
+ let trash_path = trash_dir.join(trash_file_name);
+
+ // Move the sheet file to the trash
+ fs::rename(&sheet_file_path, &trash_path).await?;
+
+ Ok(())
+ }
+
+ /// Restore the sheet from the trash
+ ///
+ /// Restore the specified sheet from the trash to its original location, making it accessible normally.
+ pub async fn restore_sheet(&self, sheet_name: &SheetName) -> Result<(), std::io::Error> {
+ let sheet_name = snake_case!(sheet_name.clone());
+
+ // Search for matching files in the trash
+ let trash_dir = self.vault_path.join(".trash");
+ if !trash_dir.exists() {
+ return Err(Error::new(
+ std::io::ErrorKind::NotFound,
+ "Trash directory does not exist!".to_string(),
+ ));
+ }
+
+ let mut found_path = None;
+ for entry in std::fs::read_dir(&trash_dir)? {
+ let entry = entry?;
+ let path = entry.path();
+
+ if path.is_file()
+ && let Some(file_name) = path.file_stem().and_then(|s| s.to_str())
+ {
+ // Check if the filename starts with the sheet name
+ if file_name.starts_with(&sheet_name) {
+ found_path = Some(path);
+ break;
+ }
+ }
+ }
+
+ let trash_path = found_path.ok_or_else(|| {
+ Error::new(
+ std::io::ErrorKind::NotFound,
+ format!("Sheet `{}` not found in trash!", &sheet_name),
+ )
+ })?;
+
+ // Restore the sheet to its original location
+ let original_path = Sheet::sheet_path_with_name(self, &sheet_name);
+ fs::rename(&trash_path, &original_path).await?;
+
+ Ok(())
+ }
+}
diff --git a/legacy_data/src/data/vault/vault_config.rs b/legacy_data/src/data/vault/vault_config.rs
new file mode 100644
index 0000000..caa8552
--- /dev/null
+++ b/legacy_data/src/data/vault/vault_config.rs
@@ -0,0 +1,233 @@
+use std::net::{IpAddr, Ipv4Addr};
+
+use cfg_file::ConfigFile;
+use serde::{Deserialize, Serialize};
+use uuid::Uuid;
+
+use crate::constants::{PORT, SERVER_FILE_VAULT};
+use crate::data::member::{Member, MemberId};
+
+pub type VaultName = String;
+pub type VaultUuid = Uuid;
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Default)]
+#[serde(rename_all = "lowercase")]
+pub enum AuthMode {
+ /// Use asymmetric keys: both client and server need to register keys, after which they can connect
+ Key,
+
+ /// Use password: the password stays on the server, and the client needs to set the password locally for connection
+ #[default]
+ Password,
+
+ /// No authentication: generally used in a strongly secure environment, skipping verification directly
+ NoAuth,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Default)]
+#[serde(rename_all = "lowercase")]
+pub enum LoggerLevel {
+ Debug,
+ Trace,
+
+ #[default]
+ Info,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Default)]
+#[serde(rename_all = "lowercase")]
+pub enum ServiceEnabled {
+ Enable,
+
+ #[default]
+ Disable,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Default)]
+#[serde(rename_all = "lowercase")]
+pub enum BehaviourEnabled {
+ Yes,
+
+ #[default]
+ No,
+}
+
+impl Into<bool> for ServiceEnabled {
+ fn into(self) -> bool {
+ match self {
+ ServiceEnabled::Enable => true,
+ ServiceEnabled::Disable => false,
+ }
+ }
+}
+
+impl Into<bool> for BehaviourEnabled {
+ fn into(self) -> bool {
+ match self {
+ BehaviourEnabled::Yes => true,
+ BehaviourEnabled::No => false,
+ }
+ }
+}
+
+#[derive(Serialize, Deserialize, ConfigFile)]
+#[cfg_file(path = SERVER_FILE_VAULT)]
+pub struct VaultConfig {
+ /// Vault uuid, unique identifier for the vault
+ #[serde(rename = "uuid")]
+ vault_uuid: VaultUuid,
+
+ /// Vault name, which can be used as the project name and generally serves as a hint
+ #[serde(rename = "name")]
+ vault_name: VaultName,
+
+ /// Vault host ids, a list of member id representing administrator identities
+ #[serde(rename = "hosts")]
+ vault_host_list: Vec<MemberId>,
+
+ /// Vault server configuration, which will be loaded when connecting to the server
+ #[serde(rename = "profile")]
+ server_config: VaultServerConfig,
+}
+
+#[derive(Serialize, Deserialize)]
+pub struct VaultServerConfig {
+ /// Local IP address to bind to when the server starts
+ #[serde(rename = "bind")]
+ local_bind: IpAddr,
+
+ /// TCP port to bind to when the server starts
+ #[serde(rename = "port")]
+ port: u16,
+
+ /// Enable logging
+ #[serde(rename = "logger")]
+ logger: Option<BehaviourEnabled>,
+
+ /// Logger Level
+ #[serde(rename = "logger_level")]
+ logger_level: Option<LoggerLevel>,
+
+ /// Whether to enable LAN discovery, allowing members on the same LAN to more easily find the upstream server
+ #[serde(rename = "lan_discovery")]
+ lan_discovery: Option<ServiceEnabled>, // TODO
+
+ /// Authentication mode for the vault server
+ /// key: Use asymmetric keys for authentication
+ /// password: Use a password for authentication
+ /// noauth: No authentication required, requires a strongly secure environment
+ #[serde(rename = "auth_mode")]
+ auth_mode: Option<AuthMode>, // TODO
+}
+
+impl Default for VaultConfig {
+ fn default() -> Self {
+ Self {
+ vault_uuid: Uuid::new_v4(),
+ vault_name: "JustEnoughVault".to_string(),
+ vault_host_list: Vec::new(),
+ server_config: VaultServerConfig {
+ local_bind: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
+ port: PORT,
+ logger: Some(BehaviourEnabled::default()),
+ logger_level: Some(LoggerLevel::default()),
+ lan_discovery: Some(ServiceEnabled::default()),
+ auth_mode: Some(AuthMode::Key),
+ },
+ }
+ }
+}
+
+/// Vault Management
+impl VaultConfig {
+ /// Change name of the vault.
+ pub fn change_name(&mut self, name: impl Into<String>) {
+ self.vault_name = name.into()
+ }
+
+ /// Add admin
+ pub fn add_admin(&mut self, member: &Member) {
+ let uuid = member.id();
+ if !self.vault_host_list.contains(&uuid) {
+ self.vault_host_list.push(uuid);
+ }
+ }
+
+ /// Remove admin
+ pub fn remove_admin(&mut self, member: &Member) {
+ let id = member.id();
+ self.vault_host_list.retain(|x| x != &id);
+ }
+
+ /// Get vault UUID
+ pub fn vault_uuid(&self) -> &VaultUuid {
+ &self.vault_uuid
+ }
+
+ /// Set vault UUID
+ pub fn set_vault_uuid(&mut self, vault_uuid: VaultUuid) {
+ self.vault_uuid = vault_uuid;
+ }
+
+ /// Get vault name
+ pub fn vault_name(&self) -> &VaultName {
+ &self.vault_name
+ }
+
+ /// Set vault name
+ pub fn set_vault_name(&mut self, vault_name: VaultName) {
+ self.vault_name = vault_name;
+ }
+
+ /// Get vault admin list
+ pub fn vault_host_list(&self) -> &Vec<MemberId> {
+ &self.vault_host_list
+ }
+
+ /// Set vault admin list
+ pub fn set_vault_host_list(&mut self, vault_host_list: Vec<MemberId>) {
+ self.vault_host_list = vault_host_list;
+ }
+
+ /// Get server config
+ pub fn server_config(&self) -> &VaultServerConfig {
+ &self.server_config
+ }
+
+ /// Set server config
+ pub fn set_server_config(&mut self, server_config: VaultServerConfig) {
+ self.server_config = server_config;
+ }
+}
+
+impl VaultServerConfig {
+ /// Get local bind IP address
+ pub fn local_bind(&self) -> &IpAddr {
+ &self.local_bind
+ }
+
+ /// Get port
+ pub fn port(&self) -> u16 {
+ self.port
+ }
+
+ /// Check if LAN discovery is enabled
+ pub fn is_lan_discovery_enabled(&self) -> bool {
+ self.lan_discovery.clone().unwrap_or_default().into()
+ }
+
+ /// Get logger enabled status
+ pub fn is_logger_enabled(&self) -> bool {
+ self.logger.clone().unwrap_or_default().into()
+ }
+
+ /// Get logger level
+ pub fn logger_level(&self) -> LoggerLevel {
+ self.logger_level.clone().unwrap_or_default()
+ }
+
+ /// Get authentication mode
+ pub fn auth_mode(&self) -> AuthMode {
+ self.auth_mode.clone().unwrap_or_default()
+ }
+}
diff --git a/legacy_data/src/data/vault/virtual_file.rs b/legacy_data/src/data/vault/virtual_file.rs
new file mode 100644
index 0000000..28e9172
--- /dev/null
+++ b/legacy_data/src/data/vault/virtual_file.rs
@@ -0,0 +1,500 @@
+use std::{
+ collections::HashMap,
+ io::{Error, ErrorKind},
+ path::PathBuf,
+};
+
+use cfg_file::{ConfigFile, config::ConfigFile};
+use serde::{Deserialize, Serialize};
+use string_proc::{dot_case, snake_case};
+use tcp_connection::instance::ConnectionInstance;
+use tokio::fs;
+use uuid::Uuid;
+
+use crate::{
+ constants::{
+ DEFAULT_VF_DESCRIPTION, DEFAULT_VF_VERSION, KEY_TEMP_NAME, KEY_VF_ID, KEY_VF_INDEX,
+ KEY_VF_VERSION, SERVER_FILE_VF_META, SERVER_FILE_VF_VERSION_INSTANCE, SERVER_PATH_VF_ROOT,
+ SERVER_PATH_VF_STORAGE, SERVER_PATH_VF_TEMP, VF_PREFIX,
+ },
+ data::{member::MemberId, vault::Vault},
+};
+
+pub type VirtualFileId = String;
+pub type VirtualFileVersion = String;
+
+pub struct VirtualFile<'a> {
+ /// Unique identifier for the virtual file
+ id: VirtualFileId,
+
+ /// Reference of Vault
+ current_vault: &'a Vault,
+}
+
+#[derive(Default, Clone, Serialize, Deserialize, ConfigFile)]
+pub struct VirtualFileMeta {
+ /// Current version of the virtual file
+ #[serde(rename = "ver")]
+ current_version: VirtualFileVersion,
+
+ /// The member who holds the edit right of the file
+ #[serde(rename = "holder")]
+ hold_member: MemberId,
+
+ /// Description of each version
+ #[serde(rename = "descs")]
+ version_description: HashMap<VirtualFileVersion, VirtualFileVersionDescription>,
+
+ /// Histories
+ #[serde(rename = "histories")]
+ histories: Vec<VirtualFileVersion>,
+}
+
+#[derive(Debug, Default, Clone, Serialize, Deserialize)]
+pub struct VirtualFileVersionDescription {
+ /// The member who created this version
+ #[serde(rename = "creator")]
+ pub creator: MemberId,
+
+ /// The description of this version
+ #[serde(rename = "desc")]
+ pub description: String,
+}
+
+impl VirtualFileVersionDescription {
+ /// Create a new version description
+ pub fn new(creator: MemberId, description: String) -> Self {
+ Self {
+ creator,
+ description,
+ }
+ }
+}
+
+/// Virtual File Operations
+impl Vault {
+ /// Generate a temporary path for receiving
+ pub fn virtual_file_temp_path(&self) -> PathBuf {
+ let random_receive_name = format!("{}", uuid::Uuid::new_v4());
+ self.vault_path
+ .join(SERVER_PATH_VF_TEMP.replace(KEY_TEMP_NAME, &random_receive_name))
+ }
+
+ /// Get the directory where virtual files are stored
+ pub fn virtual_file_storage_dir(&self) -> PathBuf {
+ self.vault_path().join(SERVER_PATH_VF_ROOT)
+ }
+
+ /// Get the directory where a specific virtual file is stored
+ pub fn virtual_file_dir(&self, id: &VirtualFileId) -> Result<PathBuf, std::io::Error> {
+ Ok(self.vault_path().join(
+ SERVER_PATH_VF_STORAGE
+ .replace(KEY_VF_ID, &id.to_string())
+ .replace(KEY_VF_INDEX, &Self::vf_index(id)?),
+ ))
+ }
+
+ // Generate index path of virtual file
+ fn vf_index(id: &VirtualFileId) -> Result<String, std::io::Error> {
+ // Remove VF_PREFIX if present
+ let id_str = if let Some(stripped) = id.strip_prefix(VF_PREFIX) {
+ stripped
+ } else {
+ id
+ };
+
+ // Extract the first part before the first hyphen
+ let first_part = id_str.split('-').next().ok_or_else(|| {
+ std::io::Error::new(
+ std::io::ErrorKind::InvalidInput,
+ "Invalid virtual file ID format: no hyphen found",
+ )
+ })?;
+
+ // Ensure the first part has at least 4 characters
+ if first_part.len() < 4 {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::InvalidInput,
+ "Invalid virtual file ID format: first part must have at least 4 characters",
+ ))?;
+ }
+
+ // Take only the first 4 characters and split into two 2-character chunks
+ let first_four = &first_part[0..4];
+ let mut path = String::new();
+ for i in (0..first_four.len()).step_by(2) {
+ if i > 0 {
+ path.push('/');
+ }
+ path.push_str(&first_four[i..i + 2]);
+ }
+
+ Ok(path)
+ }
+
+ /// Get the directory where a specific virtual file's metadata is stored
+ pub fn virtual_file_real_path(
+ &self,
+ id: &VirtualFileId,
+ version: &VirtualFileVersion,
+ ) -> PathBuf {
+ self.vault_path().join(
+ SERVER_FILE_VF_VERSION_INSTANCE
+ .replace(KEY_VF_ID, &id.to_string())
+ .replace(KEY_VF_INDEX, &Self::vf_index(id).unwrap_or_default())
+ .replace(KEY_VF_VERSION, &version.to_string()),
+ )
+ }
+
+ /// Get the directory where a specific virtual file's metadata is stored
+ pub fn virtual_file_meta_path(&self, id: &VirtualFileId) -> PathBuf {
+ self.vault_path().join(
+ SERVER_FILE_VF_META
+ .replace(KEY_VF_ID, &id.to_string())
+ .replace(KEY_VF_INDEX, &Self::vf_index(id).unwrap_or_default()),
+ )
+ }
+
+ /// Get the virtual file with the given ID
+ pub fn virtual_file(&self, id: &VirtualFileId) -> Result<VirtualFile<'_>, std::io::Error> {
+ let dir = self.virtual_file_dir(id);
+ if dir?.exists() {
+ Ok(VirtualFile {
+ id: id.clone(),
+ current_vault: self,
+ })
+ } else {
+ Err(std::io::Error::new(
+ std::io::ErrorKind::NotFound,
+ "Cannot found virtual file!",
+ ))
+ }
+ }
+
+ /// Get the meta data of the virtual file with the given ID
+ pub async fn virtual_file_meta(
+ &self,
+ id: &VirtualFileId,
+ ) -> Result<VirtualFileMeta, std::io::Error> {
+ let dir = self.virtual_file_meta_path(id);
+ let metadata = VirtualFileMeta::read_from(dir).await?;
+ Ok(metadata)
+ }
+
+ /// Write the meta data of the virtual file with the given ID
+ pub async fn write_virtual_file_meta(
+ &self,
+ id: &VirtualFileId,
+ meta: &VirtualFileMeta,
+ ) -> Result<(), std::io::Error> {
+ let dir = self.virtual_file_meta_path(id);
+ VirtualFileMeta::write_to(meta, dir).await?;
+ Ok(())
+ }
+
+ /// Create a virtual file from a connection instance
+ ///
+ /// It's the only way to create virtual files!
+ ///
+ /// When the target machine executes `write_file`, use this function instead of `read_file`,
+ /// and provide the member ID of the transmitting member.
+ ///
+ /// The system will automatically receive the file and
+ /// create the virtual file.
+ pub async fn create_virtual_file_from_connection(
+ &self,
+ instance: &mut ConnectionInstance,
+ member_id: &MemberId,
+ ) -> Result<VirtualFileId, std::io::Error> {
+ let receive_path = self.virtual_file_temp_path();
+ let new_id = format!("{}{}", VF_PREFIX, Uuid::new_v4());
+ let move_path = self.virtual_file_real_path(&new_id, &DEFAULT_VF_VERSION.to_string());
+
+ match instance.read_file(receive_path.clone()).await {
+ Ok(_) => {
+ // Read successful, create virtual file
+ // Create default version description
+ let mut version_description =
+ HashMap::<VirtualFileVersion, VirtualFileVersionDescription>::new();
+ version_description.insert(
+ DEFAULT_VF_VERSION.to_string(),
+ VirtualFileVersionDescription {
+ creator: member_id.clone(),
+ description: DEFAULT_VF_DESCRIPTION.to_string(),
+ },
+ );
+ // Create metadata
+ let mut meta = VirtualFileMeta {
+ current_version: DEFAULT_VF_VERSION.to_string(),
+ hold_member: member_id.clone(), // The holder of the newly created virtual file is the creator by default
+ version_description,
+ histories: Vec::default(),
+ };
+
+ // Add first version
+ meta.histories.push(DEFAULT_VF_VERSION.to_string());
+
+ // Write metadata to file
+ VirtualFileMeta::write_to(&meta, self.virtual_file_meta_path(&new_id)).await?;
+
+ // Move temp file to virtual file directory
+ if let Some(parent) = move_path.parent()
+ && !parent.exists()
+ {
+ fs::create_dir_all(parent).await?;
+ }
+ fs::rename(receive_path, move_path).await?;
+
+ Ok(new_id)
+ }
+ Err(e) => {
+ // Read failed, remove temp file.
+ if receive_path.exists() {
+ fs::remove_file(receive_path).await?;
+ }
+
+ Err(Error::other(e))
+ }
+ }
+ }
+
+ /// Update a virtual file from a connection instance
+ ///
+ /// It's the only way to update virtual files!
+ /// When the target machine executes `write_file`, use this function instead of `read_file`,
+ /// and provide the member ID of the transmitting member.
+ ///
+ /// The system will automatically receive the file and
+ /// update the virtual file.
+ ///
+ /// Note: The specified member must hold the edit right of the file,
+ /// otherwise the file reception will not be allowed.
+ ///
+ /// Make sure to obtain the edit right of the file before calling this function.
+ pub async fn update_virtual_file_from_connection(
+ &self,
+ instance: &mut ConnectionInstance,
+ member: &MemberId,
+ virtual_file_id: &VirtualFileId,
+ new_version: &VirtualFileVersion,
+ description: VirtualFileVersionDescription,
+ ) -> Result<(), std::io::Error> {
+ let new_version = dot_case!(new_version.clone());
+ let mut meta = self.virtual_file_meta(virtual_file_id).await?;
+
+ // Check if the member has edit right
+ self.check_virtual_file_edit_right(member, virtual_file_id)
+ .await?;
+
+ // Check if the new version already exists
+ if meta.version_description.contains_key(&new_version) {
+ return Err(Error::new(
+ ErrorKind::AlreadyExists,
+ format!(
+ "Version `{}` already exists for virtual file `{}`",
+ new_version, virtual_file_id
+ ),
+ ));
+ }
+
+ // Verify success
+ let receive_path = self.virtual_file_temp_path();
+ let move_path = self.virtual_file_real_path(virtual_file_id, &new_version);
+
+ match instance.read_file(receive_path.clone()).await {
+ Ok(_) => {
+ // Read success, move temp file to real path.
+ fs::rename(receive_path, move_path).await?;
+
+ // Update metadata
+ meta.current_version = new_version.clone();
+ meta.version_description
+ .insert(new_version.clone(), description);
+ meta.histories.push(new_version);
+ VirtualFileMeta::write_to(&meta, self.virtual_file_meta_path(virtual_file_id))
+ .await?;
+
+ Ok(())
+ }
+ Err(e) => {
+ // Read failed, remove temp file.
+ if receive_path.exists() {
+ fs::remove_file(receive_path).await?;
+ }
+
+ Err(Error::other(e))
+ }
+ }
+ }
+
+ /// Update virtual file from existing version
+ ///
+ /// This operation creates a new version based on the specified old version file instance.
+ /// The new version will retain the same version name as the old version, but use a different version number.
+ /// After the update, this version will be considered newer than the original version when comparing versions.
+ pub async fn update_virtual_file_from_exist_version(
+ &self,
+ member: &MemberId,
+ virtual_file_id: &VirtualFileId,
+ old_version: &VirtualFileVersion,
+ ) -> Result<(), std::io::Error> {
+ let old_version = snake_case!(old_version.clone());
+ let mut meta = self.virtual_file_meta(virtual_file_id).await?;
+
+ // Check if the member has edit right
+ self.check_virtual_file_edit_right(member, virtual_file_id)
+ .await?;
+
+ // Ensure virtual file exist
+ let Ok(_) = self.virtual_file(virtual_file_id) else {
+ return Err(Error::new(
+ ErrorKind::NotFound,
+ format!("Virtual file `{}` not found!", virtual_file_id),
+ ));
+ };
+
+ // Ensure version exist
+ if !meta.version_exists(&old_version) {
+ return Err(Error::new(
+ ErrorKind::NotFound,
+ format!("Version `{}` not found!", old_version),
+ ));
+ }
+
+ // Ok, Create new version
+ meta.current_version = old_version.clone();
+ meta.histories.push(old_version);
+ VirtualFileMeta::write_to(&meta, self.virtual_file_meta_path(virtual_file_id)).await?;
+
+ Ok(())
+ }
+
+ /// Grant a member the edit right for a virtual file
+ /// This operation takes effect immediately upon success
+ pub async fn grant_virtual_file_edit_right(
+ &self,
+ member_id: &MemberId,
+ virtual_file_id: &VirtualFileId,
+ ) -> Result<(), std::io::Error> {
+ let mut meta = self.virtual_file_meta(virtual_file_id).await?;
+ meta.hold_member = member_id.clone();
+ self.write_virtual_file_meta(virtual_file_id, &meta).await
+ }
+
+ /// Check if a member has the edit right for a virtual file
+ pub async fn has_virtual_file_edit_right(
+ &self,
+ member_id: &MemberId,
+ virtual_file_id: &VirtualFileId,
+ ) -> Result<bool, std::io::Error> {
+ let meta = self.virtual_file_meta(virtual_file_id).await?;
+ Ok(meta.hold_member.eq(member_id))
+ }
+
+ /// Check if a member has the edit right for a virtual file and return Result
+ /// Returns Ok(()) if the member has edit right, otherwise returns PermissionDenied error
+ pub async fn check_virtual_file_edit_right(
+ &self,
+ member_id: &MemberId,
+ virtual_file_id: &VirtualFileId,
+ ) -> Result<(), std::io::Error> {
+ if !self
+ .has_virtual_file_edit_right(member_id, virtual_file_id)
+ .await?
+ {
+ return Err(Error::new(
+ ErrorKind::PermissionDenied,
+ format!(
+ "Member `{}` not allowed to update virtual file `{}`",
+ member_id, virtual_file_id
+ ),
+ ));
+ }
+ Ok(())
+ }
+
+ /// Revoke the edit right for a virtual file from the current holder
+ /// This operation takes effect immediately upon success
+ pub async fn revoke_virtual_file_edit_right(
+ &self,
+ virtual_file_id: &VirtualFileId,
+ ) -> Result<(), std::io::Error> {
+ let mut meta = self.virtual_file_meta(virtual_file_id).await?;
+ meta.hold_member = String::default();
+ self.write_virtual_file_meta(virtual_file_id, &meta).await
+ }
+}
+
+impl<'a> VirtualFile<'a> {
+ /// Get id of VirtualFile
+ pub fn id(&self) -> VirtualFileId {
+ self.id.clone()
+ }
+
+ /// Read metadata of VirtualFile
+ pub async fn read_meta(&self) -> Result<VirtualFileMeta, std::io::Error> {
+ self.current_vault.virtual_file_meta(&self.id).await
+ }
+}
+
+impl VirtualFileMeta {
+ /// Get all versions of the virtual file
+ pub fn versions(&self) -> &Vec<VirtualFileVersion> {
+ &self.histories
+ }
+
+ /// Get the latest version of the virtual file
+ pub fn version_latest(&self) -> VirtualFileVersion {
+ // After creating a virtual file in `update_virtual_file_from_connection`,
+ // the Vec will never be empty, so unwrap is allowed here
+ self.histories.last().unwrap().clone()
+ }
+
+ /// Get the total number of versions for this virtual file
+ pub fn version_len(&self) -> i32 {
+ self.histories.len() as i32
+ }
+
+ /// Check if a specific version exists
+ /// Returns true if the version exists, false otherwise
+ pub fn version_exists(&self, version: &VirtualFileVersion) -> bool {
+ self.versions().iter().any(|v| v == version)
+ }
+
+ /// Get the version number (index) for a given version name
+ /// Returns None if the version doesn't exist
+ pub fn version_num(&self, version: &VirtualFileVersion) -> Option<i32> {
+ self.histories
+ .iter()
+ .rev()
+ .position(|v| v == version)
+ .map(|pos| (self.histories.len() - 1 - pos) as i32)
+ }
+
+ /// Get the version name for a given version number (index)
+ /// Returns None if the version number is out of range
+ pub fn version_name(&self, version_num: i32) -> Option<VirtualFileVersion> {
+ self.histories.get(version_num as usize).cloned()
+ }
+
+ /// Get the member who holds the edit right of the file
+ pub fn hold_member(&self) -> &MemberId {
+ &self.hold_member
+ }
+
+ /// Get the version descriptions for all versions
+ pub fn version_descriptions(
+ &self,
+ ) -> &HashMap<VirtualFileVersion, VirtualFileVersionDescription> {
+ &self.version_description
+ }
+
+ /// Get the version description for a given version
+ pub fn version_description(
+ &self,
+ version: VirtualFileVersion,
+ ) -> Option<&VirtualFileVersionDescription> {
+ let desc = self.version_descriptions();
+ desc.get(&version)
+ }
+}