feat(nbstore): improve nbstore (#9512)

This commit is contained in:
EYHN
2025-01-06 09:38:03 +00:00
parent a2563d2180
commit 46c8c4a408
103 changed files with 3337 additions and 3423 deletions

View File

@@ -1,29 +0,0 @@
use thiserror::Error;
#[derive(uniffi::Error, Error, Debug)]
pub enum UniffiError {
#[error("Get user document directory failed")]
GetUserDocumentDirectoryFailed,
#[error("Create affine dir failed: {0}")]
CreateAffineDirFailed(String),
#[error("Empty doc storage path")]
EmptyDocStoragePath,
#[error("Empty space id")]
EmptySpaceId,
#[error("Sqlx error: {0}")]
SqlxError(String),
#[error("Base64 decoding error: {0}")]
Base64DecodingError(String),
#[error("Invalid universal storage id: {0}. It should be in format of @peer($peer);@type($type);@id($id);")]
InvalidUniversalId(String),
#[error("Invalid space type: {0}")]
InvalidSpaceType(String),
#[error("Concat space dir failed: {0}")]
ConcatSpaceDirFailed(String),
}
impl From<sqlx::Error> for UniffiError {
fn from(err: sqlx::Error) -> Self {
UniffiError::SqlxError(err.to_string())
}
}

View File

@@ -1,15 +1,23 @@
use std::fmt::Display;
use std::str::FromStr;
use std::time::SystemTime;
use affine_common::hashcash::Stamp;
use affine_nbstore::storage;
use dashmap::{mapref::one::RefMut, DashMap, Entry};
use affine_nbstore::pool::SqliteDocStoragePool;
use crate::error::UniffiError;
#[derive(uniffi::Error, thiserror::Error, Debug)]
pub enum UniffiError {
#[error("Error: {0}")]
Err(String),
#[error("Base64 decoding error: {0}")]
Base64DecodingError(String),
#[error("Timestamp decoding error")]
TimestampDecodingError,
}
mod error;
mod utils;
impl From<affine_nbstore::error::Error> for UniffiError {
fn from(err: affine_nbstore::error::Error) -> Self {
Self::Err(err.to_string())
}
}
type Result<T> = std::result::Result<T, UniffiError>;
uniffi::setup_scaffolding!("affine_mobile_native");
@@ -22,16 +30,16 @@ pub fn hashcash_mint(resource: String, bits: u32) -> String {
pub struct DocRecord {
pub doc_id: String,
// base64 encoded data
pub data: String,
pub timestamp: SystemTime,
pub bin: String,
pub timestamp: i64,
}
impl From<affine_nbstore::DocRecord> for DocRecord {
fn from(record: affine_nbstore::DocRecord) -> Self {
Self {
doc_id: record.doc_id,
data: base64_simd::STANDARD.encode_to_string(&record.data),
timestamp: record.timestamp.and_utc().into(),
bin: base64_simd::STANDARD.encode_to_string(&record.bin),
timestamp: record.timestamp.and_utc().timestamp_millis(),
}
}
}
@@ -39,13 +47,15 @@ impl From<affine_nbstore::DocRecord> for DocRecord {
impl TryFrom<DocRecord> for affine_nbstore::DocRecord {
type Error = UniffiError;
fn try_from(record: DocRecord) -> Result<Self, Self::Error> {
fn try_from(record: DocRecord) -> Result<Self> {
Ok(Self {
doc_id: record.doc_id,
data: base64_simd::STANDARD
.decode_to_vec(record.data)
bin: base64_simd::STANDARD
.decode_to_vec(record.bin)
.map_err(|e| UniffiError::Base64DecodingError(e.to_string()))?,
timestamp: chrono::DateTime::<chrono::Utc>::from(record.timestamp).naive_utc(),
timestamp: chrono::DateTime::<chrono::Utc>::from_timestamp_millis(record.timestamp)
.ok_or(UniffiError::TimestampDecodingError)?
.naive_utc(),
})
}
}
@@ -53,52 +63,60 @@ impl TryFrom<DocRecord> for affine_nbstore::DocRecord {
#[derive(uniffi::Record)]
pub struct DocUpdate {
pub doc_id: String,
pub created_at: SystemTime,
pub timestamp: i64,
// base64 encoded data
pub data: String,
pub bin: String,
}
impl From<affine_nbstore::DocUpdate> for DocUpdate {
fn from(update: affine_nbstore::DocUpdate) -> Self {
Self {
doc_id: update.doc_id,
created_at: update.created_at.and_utc().into(),
data: base64_simd::STANDARD.encode_to_string(&update.data),
timestamp: update.timestamp.and_utc().timestamp_millis(),
bin: base64_simd::STANDARD.encode_to_string(&update.bin),
}
}
}
impl From<DocUpdate> for affine_nbstore::DocUpdate {
fn from(update: DocUpdate) -> Self {
Self {
impl TryFrom<DocUpdate> for affine_nbstore::DocUpdate {
type Error = UniffiError;
fn try_from(update: DocUpdate) -> Result<Self> {
Ok(Self {
doc_id: update.doc_id,
created_at: chrono::DateTime::<chrono::Utc>::from(update.created_at).naive_utc(),
data: update.data.into(),
}
timestamp: chrono::DateTime::<chrono::Utc>::from_timestamp_millis(update.timestamp)
.ok_or(UniffiError::TimestampDecodingError)?
.naive_utc(),
bin: update.bin.into(),
})
}
}
#[derive(uniffi::Record)]
pub struct DocClock {
pub doc_id: String,
pub timestamp: SystemTime,
pub timestamp: i64,
}
impl From<affine_nbstore::DocClock> for DocClock {
fn from(clock: affine_nbstore::DocClock) -> Self {
Self {
doc_id: clock.doc_id,
timestamp: clock.timestamp.and_utc().into(),
timestamp: clock.timestamp.and_utc().timestamp_millis(),
}
}
}
impl From<DocClock> for affine_nbstore::DocClock {
fn from(clock: DocClock) -> Self {
Self {
impl TryFrom<DocClock> for affine_nbstore::DocClock {
type Error = UniffiError;
fn try_from(clock: DocClock) -> Result<Self> {
Ok(Self {
doc_id: clock.doc_id,
timestamp: chrono::DateTime::<chrono::Utc>::from(clock.timestamp).naive_utc(),
}
timestamp: chrono::DateTime::<chrono::Utc>::from_timestamp_millis(clock.timestamp)
.ok_or(UniffiError::TimestampDecodingError)?
.naive_utc(),
})
}
}
@@ -109,7 +127,7 @@ pub struct Blob {
pub data: String,
pub mime: String,
pub size: i64,
pub created_at: SystemTime,
pub created_at: i64,
}
impl From<affine_nbstore::Blob> for Blob {
@@ -119,7 +137,7 @@ impl From<affine_nbstore::Blob> for Blob {
data: base64_simd::STANDARD.encode_to_string(&blob.data),
mime: blob.mime,
size: blob.size,
created_at: blob.created_at.and_utc().into(),
created_at: blob.created_at.and_utc().timestamp_millis(),
}
}
}
@@ -135,7 +153,7 @@ pub struct SetBlob {
impl TryFrom<SetBlob> for affine_nbstore::SetBlob {
type Error = UniffiError;
fn try_from(blob: SetBlob) -> Result<Self, Self::Error> {
fn try_from(blob: SetBlob) -> Result<Self> {
Ok(Self {
key: blob.key,
data: base64_simd::STANDARD
@@ -151,7 +169,7 @@ pub struct ListedBlob {
pub key: String,
pub size: i64,
pub mime: String,
pub created_at: SystemTime,
pub created_at: i64,
}
impl From<affine_nbstore::ListedBlob> for ListedBlob {
@@ -160,76 +178,43 @@ impl From<affine_nbstore::ListedBlob> for ListedBlob {
key: blob.key,
size: blob.size,
mime: blob.mime,
created_at: blob.created_at.and_utc().into(),
created_at: blob.created_at.and_utc().timestamp_millis(),
}
}
}
#[derive(uniffi::Object)]
pub struct DocStoragePool {
inner: DashMap<String, storage::SqliteDocStorage>,
}
impl DocStoragePool {
fn ensure_storage<'a>(
&'a self,
universal_id: &str,
) -> Result<RefMut<'a, String, storage::SqliteDocStorage>, UniffiError> {
let entry = self.inner.entry(universal_id.to_string());
if let Entry::Occupied(storage) = entry {
return Ok(storage.into_ref());
}
let options = parse_universal_id(entry.key())?;
let db_path = utils::get_db_path(&options)?;
if db_path.is_empty() {
return Err(UniffiError::EmptyDocStoragePath);
}
let storage = storage::SqliteDocStorage::new(db_path);
Ok(entry.or_insert(storage))
}
inner: SqliteDocStoragePool,
}
#[uniffi::export]
pub fn new_doc_storage_pool() -> DocStoragePool {
DocStoragePool {
inner: Default::default(),
}
}
#[uniffi::export(async_runtime = "tokio")]
impl DocStoragePool {
/// Initialize the database and run migrations.
pub async fn connect(&self, universal_id: String) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.connect().await?)
pub async fn connect(&self, universal_id: String, path: String) -> Result<()> {
Ok(self.inner.connect(universal_id, path).await?)
}
pub async fn close(&self, universal_id: String) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
storage.close().await;
self.inner.remove(&universal_id);
pub async fn disconnect(&self, universal_id: String) -> Result<()> {
self.inner.disconnect(universal_id).await?;
Ok(())
}
pub fn is_closed(&self, universal_id: String) -> bool {
let storage = self.ensure_storage(&universal_id).unwrap();
storage.is_closed()
}
pub async fn checkpoint(&self, universal_id: String) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.checkpoint().await?)
}
pub async fn validate(&self, universal_id: String) -> Result<bool, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.validate().await?)
}
pub async fn set_space_id(
&self,
universal_id: String,
space_id: String,
) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
if space_id.is_empty() {
return Err(UniffiError::EmptySpaceId);
}
Ok(storage.set_space_id(space_id).await?)
pub async fn set_space_id(&self, universal_id: String, space_id: String) -> Result<()> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.set_space_id(space_id)
.await?,
)
}
pub async fn push_update(
@@ -237,10 +222,11 @@ impl DocStoragePool {
universal_id: String,
doc_id: String,
update: String,
) -> Result<SystemTime, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
) -> Result<i64> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.push_update(
doc_id,
base64_simd::STANDARD
@@ -249,7 +235,7 @@ impl DocStoragePool {
)
.await?
.and_utc()
.into(),
.timestamp_millis(),
)
}
@@ -257,28 +243,36 @@ impl DocStoragePool {
&self,
universal_id: String,
doc_id: String,
) -> Result<Option<DocRecord>, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.get_doc_snapshot(doc_id).await?.map(Into::into))
) -> Result<Option<DocRecord>> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.get_doc_snapshot(doc_id)
.await?
.map(Into::into),
)
}
pub async fn set_doc_snapshot(
&self,
universal_id: String,
snapshot: DocRecord,
) -> Result<bool, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.set_doc_snapshot(snapshot.try_into()?).await?)
pub async fn set_doc_snapshot(&self, universal_id: String, snapshot: DocRecord) -> Result<bool> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.set_doc_snapshot(snapshot.try_into()?)
.await?,
)
}
pub async fn get_doc_updates(
&self,
universal_id: String,
doc_id: String,
) -> Result<Vec<DocUpdate>, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
) -> Result<Vec<DocUpdate>> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.get_doc_updates(doc_id)
.await?
.into_iter()
@@ -291,36 +285,55 @@ impl DocStoragePool {
&self,
universal_id: String,
doc_id: String,
updates: Vec<SystemTime>,
) -> Result<u32, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
updates: Vec<i64>,
) -> Result<u32> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.mark_updates_merged(
doc_id,
updates
.into_iter()
.map(|t| chrono::DateTime::<chrono::Utc>::from(t).naive_utc())
.collect(),
.map(|t| {
chrono::DateTime::<chrono::Utc>::from_timestamp_millis(t)
.ok_or(UniffiError::TimestampDecodingError)
.map(|t| t.naive_utc())
})
.collect::<Result<Vec<_>>>()?,
)
.await?,
)
}
pub async fn delete_doc(&self, universal_id: String, doc_id: String) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.delete_doc(doc_id).await?)
pub async fn delete_doc(&self, universal_id: String, doc_id: String) -> Result<()> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.delete_doc(doc_id)
.await?,
)
}
pub async fn get_doc_clocks(
&self,
universal_id: String,
after: Option<SystemTime>,
) -> Result<Vec<DocClock>, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
after: Option<i64>,
) -> Result<Vec<DocClock>> {
Ok(
storage
.get_doc_clocks(after.map(|t| chrono::DateTime::<chrono::Utc>::from(t).naive_utc()))
self
.inner
.ensure_storage(universal_id)?
.get_doc_clocks(
after
.map(|t| {
chrono::DateTime::<chrono::Utc>::from_timestamp_millis(t)
.ok_or(UniffiError::TimestampDecodingError)
.map(|t| t.naive_utc())
})
.transpose()?,
)
.await?
.into_iter()
.map(Into::into)
@@ -332,23 +345,36 @@ impl DocStoragePool {
&self,
universal_id: String,
doc_id: String,
) -> Result<Option<DocClock>, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.get_doc_clock(doc_id).await?.map(Into::into))
) -> Result<Option<DocClock>> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.get_doc_clock(doc_id)
.await?
.map(Into::into),
)
}
pub async fn get_blob(
&self,
universal_id: String,
key: String,
) -> Result<Option<Blob>, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.get_blob(key).await?.map(Into::into))
pub async fn get_blob(&self, universal_id: String, key: String) -> Result<Option<Blob>> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.get_blob(key)
.await?
.map(Into::into),
)
}
pub async fn set_blob(&self, universal_id: String, blob: SetBlob) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.set_blob(blob.try_into()?).await?)
pub async fn set_blob(&self, universal_id: String, blob: SetBlob) -> Result<()> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.set_blob(blob.try_into()?)
.await?,
)
}
pub async fn delete_blob(
@@ -356,20 +382,31 @@ impl DocStoragePool {
universal_id: String,
key: String,
permanently: bool,
) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.delete_blob(key, permanently).await?)
}
pub async fn release_blobs(&self, universal_id: String) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.release_blobs().await?)
}
pub async fn list_blobs(&self, universal_id: String) -> Result<Vec<ListedBlob>, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
) -> Result<()> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.delete_blob(key, permanently)
.await?,
)
}
pub async fn release_blobs(&self, universal_id: String) -> Result<()> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.release_blobs()
.await?,
)
}
pub async fn list_blobs(&self, universal_id: String) -> Result<Vec<ListedBlob>> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.list_blobs()
.await?
.into_iter()
@@ -382,10 +419,11 @@ impl DocStoragePool {
&self,
universal_id: String,
peer: String,
) -> Result<Vec<DocClock>, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
) -> Result<Vec<DocClock>> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.get_peer_remote_clocks(peer)
.await?
.into_iter()
@@ -399,9 +437,15 @@ impl DocStoragePool {
universal_id: String,
peer: String,
doc_id: String,
) -> Result<DocClock, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.get_peer_remote_clock(peer, doc_id).await?.into())
) -> Result<DocClock> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.get_peer_remote_clock(peer, doc_id)
.await?
.into(),
)
}
pub async fn set_peer_remote_clock(
@@ -409,15 +453,18 @@ impl DocStoragePool {
universal_id: String,
peer: String,
doc_id: String,
clock: SystemTime,
) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
clock: i64,
) -> Result<()> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.set_peer_remote_clock(
peer,
doc_id,
chrono::DateTime::<chrono::Utc>::from(clock).naive_utc(),
chrono::DateTime::<chrono::Utc>::from_timestamp_millis(clock)
.ok_or(UniffiError::TimestampDecodingError)?
.naive_utc(),
)
.await?,
)
@@ -427,10 +474,11 @@ impl DocStoragePool {
&self,
universal_id: String,
peer: String,
) -> Result<Vec<DocClock>, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
) -> Result<Vec<DocClock>> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.get_peer_pulled_remote_clocks(peer)
.await?
.into_iter()
@@ -444,10 +492,11 @@ impl DocStoragePool {
universal_id: String,
peer: String,
doc_id: String,
) -> Result<DocClock, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
) -> Result<DocClock> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.get_peer_pulled_remote_clock(peer, doc_id)
.await?
.into(),
@@ -459,15 +508,18 @@ impl DocStoragePool {
universal_id: String,
peer: String,
doc_id: String,
clock: SystemTime,
) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
clock: i64,
) -> Result<()> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.set_peer_pulled_remote_clock(
peer,
doc_id,
chrono::DateTime::<chrono::Utc>::from(clock).naive_utc(),
chrono::DateTime::<chrono::Utc>::from_timestamp_millis(clock)
.ok_or(UniffiError::TimestampDecodingError)?
.naive_utc(),
)
.await?,
)
@@ -477,10 +529,11 @@ impl DocStoragePool {
&self,
universal_id: String,
peer: String,
) -> Result<Vec<DocClock>, UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
) -> Result<Vec<DocClock>> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.get_peer_pushed_clocks(peer)
.await?
.into_iter()
@@ -494,203 +547,30 @@ impl DocStoragePool {
universal_id: String,
peer: String,
doc_id: String,
clock: SystemTime,
) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
clock: i64,
) -> Result<()> {
Ok(
storage
self
.inner
.ensure_storage(universal_id)?
.set_peer_pushed_clock(
peer,
doc_id,
chrono::DateTime::<chrono::Utc>::from(clock).naive_utc(),
chrono::DateTime::<chrono::Utc>::from_timestamp_millis(clock)
.ok_or(UniffiError::TimestampDecodingError)?
.naive_utc(),
)
.await?,
)
}
pub async fn clear_clocks(&self, universal_id: String) -> Result<(), UniffiError> {
let storage = self.ensure_storage(&universal_id)?;
Ok(storage.clear_clocks().await?)
}
}
#[uniffi::export]
pub fn get_db_path(peer: String, space_type: String, id: String) -> Result<String, UniffiError> {
let options = StorageOptions {
peer,
space_type: SpaceType::from_str(&space_type)?,
id,
};
utils::get_db_path(&options)
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, Default)]
pub enum SpaceType {
#[default]
Userspace,
Workspace,
}
impl Display for SpaceType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
SpaceType::Userspace => write!(f, "userspace"),
SpaceType::Workspace => write!(f, "workspace"),
}
}
}
impl FromStr for SpaceType {
type Err = UniffiError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"userspace" => Self::Userspace,
"workspace" => Self::Workspace,
_ => return Err(UniffiError::InvalidSpaceType(s.to_string())),
})
}
}
pub struct StorageOptions {
pub peer: String,
pub space_type: SpaceType,
pub id: String,
}
pub fn parse_universal_id(id: &str) -> Result<StorageOptions, UniffiError> {
let mut result = StorageOptions {
peer: String::new(),
space_type: SpaceType::default(),
id: String::new(),
};
let mut key = String::new();
let mut value = String::new();
let mut is_in_value = false;
let mut chars = id.chars().peekable();
while let Some(ch) = chars.next() {
if is_in_value {
if ch == ')' && chars.peek() == Some(&';') {
// Store the collected value in the appropriate field
match key.as_str() {
"peer" => result.peer = value.clone(),
"type" => result.space_type = SpaceType::from_str(&value)?,
"id" => result.id = value.clone(),
_ => return Err(UniffiError::InvalidUniversalId(id.to_string())),
}
key.clear();
value.clear();
is_in_value = false;
chars.next(); // Skip the semicolon
continue;
}
value.push(ch);
continue;
}
if ch == '@' {
// Find the position of next '('
let mut temp_chars = chars.clone();
let mut found_paren = false;
let mut key_chars = Vec::new();
while let Some(next_ch) = temp_chars.next() {
if next_ch == '(' {
found_paren = true;
break;
}
key_chars.push(next_ch);
}
// Invalid format if no '(' found or it's immediately after '@'
if !found_paren || key_chars.is_empty() {
return Err(UniffiError::InvalidUniversalId(id.to_string()));
}
key = key_chars.into_iter().collect();
// Advance the original iterator to the position after the key
for _ in 0..key.len() + 1 {
chars.next();
}
is_in_value = true;
} else {
return Err(UniffiError::InvalidUniversalId(id.to_string()));
}
}
// Validate the parsed results
if result.peer.is_empty() || result.id.is_empty() {
return Err(UniffiError::InvalidUniversalId(id.to_string()));
}
Ok(result)
}
#[cfg(test)]
mod tests {
use super::*;
// ... existing test functions ...
#[test]
fn test_universal_id() {
let options = StorageOptions {
peer: "123".to_string(),
space_type: SpaceType::Workspace,
id: "456".to_string(),
};
let id = format!(
"@peer({});@type({});@id({});",
options.peer, options.space_type, options.id
);
let result = parse_universal_id(&id).unwrap();
assert_eq!(result.peer, "123");
assert_eq!(result.space_type, SpaceType::Workspace);
assert_eq!(result.id, "456");
}
#[test]
fn test_parse_universal_id_valid_cases() {
let testcases = vec![
"@peer(123);@type(userspace);@id(456);",
"@peer(123);@type(workspace);@id(456);",
"@peer(https://app.affine.pro);@type(userspace);@id(hello:world);",
"@peer(@name);@type(userspace);@id(@id);",
"@peer(@peer(name);@type(userspace);@id(@id);",
];
for id in testcases {
let result = parse_universal_id(id);
assert!(result.is_ok(), "Failed to parse: {}", id);
let parsed = result.unwrap();
assert!(!parsed.peer.is_empty());
assert!(!parsed.id.is_empty());
}
}
#[test]
fn test_parse_universal_id_invalid_cases() {
let testcases = vec![
// invalid space type
"@peer(123);@type(anyspace);@id(456);",
// invalid peer
"@peer(@peer(name););@type(userspace);@id(@id);",
];
for id in testcases {
let result = parse_universal_id(id);
assert!(result.is_err(), "Should have failed to parse: {}", id);
match result {
Err(UniffiError::InvalidUniversalId(_)) => (),
Err(UniffiError::InvalidSpaceType(_)) => (),
_ => panic!("Expected InvalidUniversalId error for: {}", id),
}
}
pub async fn clear_clocks(&self, universal_id: String) -> Result<()> {
Ok(
self
.inner
.ensure_storage(universal_id)?
.clear_clocks()
.await?,
)
}
}

View File

@@ -1,141 +0,0 @@
use std::fs;
#[cfg(not(any(target_os = "ios", target_os = "macos")))]
use homedir::my_home;
#[cfg(any(target_os = "ios", target_os = "macos"))]
use objc2::rc::autoreleasepool;
#[cfg(any(target_os = "ios", target_os = "macos"))]
use objc2_foundation::{NSFileManager, NSSearchPathDirectory, NSSearchPathDomainMask, NSString};
use crate::{error::UniffiError, SpaceType, StorageOptions};
const DB_FILE_NAME: &str = "storage.db";
#[cfg(any(target_os = "ios", target_os = "macos"))]
pub(crate) fn get_db_path(options: &StorageOptions) -> Result<String, UniffiError> {
let file_manager = unsafe { NSFileManager::defaultManager() };
// equivalent to Swift:
// ```swift
// guard let documentsPath = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else {
// return nil
// }
// ```
let urls = unsafe {
file_manager.URLsForDirectory_inDomains(
NSSearchPathDirectory::NSDocumentDirectory,
NSSearchPathDomainMask::NSUserDomainMask,
)
};
let document_directory = urls
.first()
.ok_or(UniffiError::GetUserDocumentDirectoryFailed)?;
let affine_dir = unsafe {
let spaces_dir = match options.space_type {
SpaceType::Userspace => "userspaces",
SpaceType::Workspace => "workspaces",
};
let escaped_peer = escape_filename(&options.peer);
document_directory
.URLByAppendingPathComponent(&NSString::from_str(".affine"))
.and_then(|url| url.URLByAppendingPathComponent(&NSString::from_str(spaces_dir)))
.and_then(|url| url.URLByAppendingPathComponent(&NSString::from_str(&escaped_peer)))
.and_then(|url| url.URLByAppendingPathComponent(&NSString::from_str(&options.id)))
}
.ok_or(UniffiError::ConcatSpaceDirFailed(format!(
"{}:{}:{}",
options.peer, options.space_type, options.id
)))?;
let affine_dir_str = autoreleasepool(|pool| {
Ok::<String, UniffiError>(
unsafe { affine_dir.path() }
.ok_or(UniffiError::GetUserDocumentDirectoryFailed)?
.as_str(pool)
.to_string(),
)
})?;
// Replicate Swift's appending ".affine" subdir, creating it if necessary
fs::create_dir_all(&affine_dir_str)
.map_err(|_| UniffiError::CreateAffineDirFailed(affine_dir_str.clone()))?;
let db_path = autoreleasepool(|pool| {
let db_path =
unsafe { affine_dir.URLByAppendingPathComponent(&NSString::from_str(DB_FILE_NAME)) }.ok_or(
UniffiError::ConcatSpaceDirFailed(format!(
"{}:{}:{}/{DB_FILE_NAME}",
options.peer, options.space_type, options.id
)),
)?;
Ok::<String, UniffiError>(
unsafe { db_path.path() }
.ok_or(UniffiError::GetUserDocumentDirectoryFailed)?
.as_str(pool)
.to_string(),
)
})?;
Ok(db_path)
}
#[cfg(not(any(target_os = "ios", target_os = "macos")))]
pub(crate) fn get_db_path(options: &StorageOptions) -> Result<String, UniffiError> {
let home_dir = my_home()
.map_err(|_| UniffiError::GetUserDocumentDirectoryFailed)?
.ok_or(UniffiError::GetUserDocumentDirectoryFailed)?;
let spaces_dir = match options.space_type {
SpaceType::Userspace => "userspaces",
SpaceType::Workspace => "workspaces",
};
let escaped_peer = escape_filename(&options.peer);
let db_path = home_dir
.join(".affine")
.join(spaces_dir)
.join(&escaped_peer)
.join(&options.id);
fs::create_dir_all(&db_path)
.map_err(|_| UniffiError::CreateAffineDirFailed(db_path.to_string_lossy().to_string()))?;
db_path
.join(DB_FILE_NAME)
.to_str()
.map(|p| p.to_owned())
.ok_or(UniffiError::GetUserDocumentDirectoryFailed)
}
fn escape_filename(name: &str) -> String {
// First replace special chars with '_'
let with_underscores = name.replace(|c: char| "\\/!@#$%^&*()+~`\"':;,?<>|".contains(c), "_");
// Then collapse multiple '_' into single '_'
let mut result = String::with_capacity(with_underscores.len());
let mut last_was_underscore = false;
for c in with_underscores.chars() {
if c == '_' {
if !last_was_underscore {
result.push(c);
}
last_was_underscore = true;
} else {
result.push(c);
last_was_underscore = false;
}
}
// Remove trailing underscore
result.trim_end_matches('_').to_string()
}
#[cfg(all(test, any(target_os = "ios", target_os = "macos")))]
mod tests {
use super::*;
#[test]
fn test_escape_filename() {
assert_eq!(escape_filename("hello@world"), "hello_world");
assert_eq!(escape_filename("test!!file"), "test_file");
assert_eq!(escape_filename("_test_"), "_test"); // Leading underscore preserved
assert_eq!(escape_filename("multi___under"), "multi_under");
assert_eq!(escape_filename("path/to\\file"), "path_to_file");
}
}