chore: add test

This commit is contained in:
DarkSky
2026-02-20 16:06:03 +08:00
parent a0ec463ec2
commit de7876d4a9
8 changed files with 484 additions and 118 deletions

1
Cargo.lock generated
View File

@@ -116,6 +116,7 @@ dependencies = [
"objc2-foundation",
"sqlx",
"thiserror 2.0.17",
"tokio",
"uniffi",
]

View File

@@ -13,6 +13,7 @@
"@capacitor/core": "^7.0.0"
},
"devDependencies": {
"typescript": "^5.7.2"
"typescript": "^5.7.2",
"vitest": "^3.2.4"
}
}

View File

@@ -0,0 +1,80 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
const { mockBase64ToUint8Array, mockConvertFileSrc } = vi.hoisted(() => ({
mockBase64ToUint8Array: vi.fn((data: string) =>
Uint8Array.from(data.split('').map(char => char.charCodeAt(0)))
),
mockConvertFileSrc: vi.fn((path: string) => `capacitor://localhost${path}`),
}));
vi.mock('@affine/core/modules/workspace-engine', () => ({
base64ToUint8Array: mockBase64ToUint8Array,
}));
vi.mock('@capacitor/core', () => ({
Capacitor: {
convertFileSrc: mockConvertFileSrc,
},
}));
import { decodePayload, MOBILE_DOC_FILE_PREFIX } from './payload';
describe('decodePayload', () => {
const fetchMock = vi.fn<typeof fetch>();
beforeEach(() => {
fetchMock.mockReset();
mockBase64ToUint8Array.mockClear();
mockConvertFileSrc.mockClear();
vi.stubGlobal('fetch', fetchMock);
});
afterEach(() => {
vi.unstubAllGlobals();
});
it('decodes inline base64 payloads without file IO', async () => {
const decoded = await decodePayload('ZGF0YQ==', MOBILE_DOC_FILE_PREFIX);
expect(decoded).toEqual(Uint8Array.from([90, 71, 70, 48, 89, 81, 61, 61]));
expect(mockBase64ToUint8Array).toHaveBeenCalledWith('ZGF0YQ==');
expect(fetchMock).not.toHaveBeenCalled();
});
it('reads valid cache file tokens', async () => {
const expected = Uint8Array.from([1, 2, 3, 4]);
fetchMock.mockResolvedValue({
ok: true,
status: 200,
arrayBuffer: async () => expected.buffer,
} as Response);
const path =
'/var/mobile/Containers/Data/Application/abc/Library/Caches/nbstore-blob-cache/0123456789abcdef/fedcba9876543210.docbin';
const decoded = await decodePayload(
`${MOBILE_DOC_FILE_PREFIX}${path}`,
MOBILE_DOC_FILE_PREFIX
);
expect(decoded).toEqual(expected);
expect(mockConvertFileSrc).toHaveBeenCalledWith(`file://${path}`);
expect(fetchMock).toHaveBeenCalledTimes(1);
});
it('rejects suffix-only paths outside expected cache shape', async () => {
const path =
'/attacker/nbstore-blob-cache/0123456789abcdef/fedcba9876543210.docbin';
await expect(
decodePayload(`${MOBILE_DOC_FILE_PREFIX}${path}`, MOBILE_DOC_FILE_PREFIX)
).rejects.toThrow('Refusing to read mobile payload outside cache dir');
expect(fetchMock).not.toHaveBeenCalled();
});
it('rejects encoded traversal segments', async () => {
const path =
'/var/mobile/Containers/Data/Application/abc/Library/Caches/nbstore-blob-cache/%2E%2E/fedcba9876543210.docbin';
await expect(
decodePayload(`${MOBILE_DOC_FILE_PREFIX}${path}`, MOBILE_DOC_FILE_PREFIX)
).rejects.toThrow('Refusing to read mobile payload outside cache dir');
expect(fetchMock).not.toHaveBeenCalled();
});
});

View File

@@ -3,8 +3,10 @@ import { Capacitor } from '@capacitor/core';
export const MOBILE_BLOB_FILE_PREFIX = '__AFFINE_BLOB_FILE__:';
export const MOBILE_DOC_FILE_PREFIX = '__AFFINE_DOC_FILE__:';
const MOBILE_PAYLOAD_CACHE_PATH_PATTERN =
/\/nbstore-blob-cache\/[0-9a-f]{16}\/[0-9a-f]{16}\.(blob|docbin)$/;
const MOBILE_PAYLOAD_CACHE_DIR = 'nbstore-blob-cache';
const MOBILE_PAYLOAD_BUCKET_PATTERN = /^[0-9a-f]{16}$/;
const MOBILE_PAYLOAD_FILE_PATTERN = /^[0-9a-f]{16}\.(blob|docbin)$/;
const MOBILE_PAYLOAD_PARENT_DIRS = new Set(['cache', 'Caches', 'T', 'tmp']);
function normalizeTokenFilePath(rawPath: string): string {
const trimmedPath = rawPath.trim();
@@ -20,15 +22,55 @@ function normalizeTokenFilePath(rawPath: string): string {
function assertMobileCachePath(fileUrl: string): void {
let pathname: string;
try {
pathname = decodeURIComponent(new URL(fileUrl).pathname);
const parsedUrl = new URL(fileUrl);
if (parsedUrl.protocol !== 'file:') {
throw new Error('unexpected protocol');
}
pathname = parsedUrl.pathname;
} catch {
throw new Error('Invalid mobile payload token: malformed file URL');
}
let decodedSegments: string[];
try {
decodedSegments = pathname
.split('/')
.filter(Boolean)
.map(segment => {
const decoded = decodeURIComponent(segment);
if (
!decoded ||
decoded === '.' ||
decoded === '..' ||
decoded.includes('/') ||
decoded.includes('\\')
) {
throw new Error('path traversal');
}
return decoded;
});
} catch {
throw new Error(
`Refusing to read mobile payload outside cache dir: ${fileUrl}`
);
}
const fileName = decodedSegments.at(-1);
const bucket = decodedSegments.at(-2);
const cacheDir = decodedSegments.at(-3);
const parentDir = decodedSegments.at(-4);
const cacheParent = decodedSegments.at(-5);
if (
pathname.includes('/../') ||
pathname.includes('/./') ||
!MOBILE_PAYLOAD_CACHE_PATH_PATTERN.test(pathname)
!fileName ||
!bucket ||
!cacheDir ||
!parentDir ||
cacheDir !== MOBILE_PAYLOAD_CACHE_DIR ||
!MOBILE_PAYLOAD_BUCKET_PATTERN.test(bucket) ||
!MOBILE_PAYLOAD_FILE_PATTERN.test(fileName) ||
!MOBILE_PAYLOAD_PARENT_DIRS.has(parentDir) ||
(parentDir === 'Caches' && cacheParent !== 'Library')
) {
throw new Error(
`Refusing to read mobile payload outside cache dir: ${fileUrl}`

View File

@@ -23,6 +23,7 @@ base64-simd = { workspace = true }
chrono = { workspace = true }
sqlx = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true }
uniffi = { workspace = true, features = ["cli", "tokio"] }
[target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies]
@@ -45,4 +46,5 @@ lru = { workspace = true }
uniffi = { workspace = true, features = ["build"] }
[dev-dependencies]
lru = { workspace = true }
lru = { workspace = true }
tokio = { workspace = true, features = ["full"] }

View File

@@ -1,9 +1,12 @@
#[cfg(any(target_os = "android", target_os = "ios", test))]
use std::sync::Arc;
use affine_common::hashcash::Stamp;
use affine_nbstore::{Data, pool::SqliteDocStoragePool};
#[cfg(any(target_os = "android", target_os = "ios", test))]
#[cfg_attr(all(test, not(any(target_os = "android", target_os = "ios"))), allow(dead_code))]
pub(crate) mod mobile_blob_cache;
#[cfg(any(target_os = "android", target_os = "ios"))]
#[cfg(any(target_os = "android", target_os = "ios", test))]
use mobile_blob_cache::{MOBILE_BLOB_INLINE_THRESHOLD_BYTES, MobileBlobCache, is_mobile_binary_file_token};
#[derive(uniffi::Error, thiserror::Error, Debug)]
@@ -103,48 +106,6 @@ impl TryFrom<DocUpdate> for affine_nbstore::DocUpdate {
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn doc_update_roundtrip_base64() {
let timestamp = chrono::DateTime::<chrono::Utc>::from_timestamp_millis(1_700_000_000_000)
.unwrap()
.naive_utc();
let original = affine_nbstore::DocUpdate {
doc_id: "doc-1".to_string(),
timestamp,
bin: vec![1, 2, 3, 4, 5],
};
let encoded: DocUpdate = original.into();
let decoded = affine_nbstore::DocUpdate::try_from(encoded).unwrap();
assert_eq!(decoded.doc_id, "doc-1");
assert_eq!(decoded.timestamp, timestamp);
assert_eq!(decoded.bin, vec![1, 2, 3, 4, 5]);
}
#[test]
fn doc_update_rejects_invalid_base64() {
let update = DocUpdate {
doc_id: "doc-2".to_string(),
timestamp: 0,
bin: "not-base64!!".to_string(),
};
let err = match affine_nbstore::DocUpdate::try_from(update) {
Ok(_) => panic!("expected base64 decode error"),
Err(err) => err,
};
match err {
UniffiError::Base64DecodingError(_) => {}
other => panic!("unexpected error: {other:?}"),
}
}
}
#[derive(uniffi::Record)]
pub struct DocClock {
pub doc_id: String,
@@ -316,44 +277,68 @@ impl From<affine_nbstore::indexer::NativeMatch> for MatchRange {
#[derive(uniffi::Object)]
pub struct DocStoragePool {
inner: SqliteDocStoragePool,
#[cfg(any(target_os = "android", target_os = "ios"))]
mobile_blob_cache: MobileBlobCache,
#[cfg(any(target_os = "android", target_os = "ios", test))]
mobile_blob_cache: Arc<MobileBlobCache>,
}
#[uniffi::export]
pub fn new_doc_storage_pool() -> DocStoragePool {
DocStoragePool {
inner: Default::default(),
#[cfg(any(target_os = "android", target_os = "ios"))]
mobile_blob_cache: MobileBlobCache::new(),
#[cfg(any(target_os = "android", target_os = "ios", test))]
mobile_blob_cache: Arc::new(MobileBlobCache::new()),
}
}
impl DocStoragePool {
#[cfg(any(target_os = "android", target_os = "ios", test))]
async fn run_mobile_cache_io<T, F>(&self, task: F, context: &'static str) -> Result<T>
where
T: Send + 'static,
F: FnOnce(Arc<MobileBlobCache>) -> std::io::Result<T> + Send + 'static,
{
let cache = Arc::clone(&self.mobile_blob_cache);
tokio::task::spawn_blocking(move || task(cache))
.await
.map_err(|err| UniffiError::Err(format!("{context}: {err}")))?
.map_err(|err| UniffiError::Err(format!("{context}: {err}")))
}
}
#[uniffi::export(async_runtime = "tokio")]
impl DocStoragePool {
fn decode_mobile_data(&self, universal_id: &str, data: &str) -> Result<Vec<u8>> {
#[cfg(any(target_os = "android", target_os = "ios"))]
async fn decode_mobile_data(&self, universal_id: &str, data: &str) -> Result<Vec<u8>> {
#[cfg(any(target_os = "android", target_os = "ios", test))]
if is_mobile_binary_file_token(data) {
let universal_id = universal_id.to_string();
let data = data.to_string();
return self
.mobile_blob_cache
.read_binary_file(universal_id, data)
.map_err(|err| UniffiError::Err(format!("Failed to read mobile file token: {err}")));
.run_mobile_cache_io(
move |cache| cache.read_binary_file(&universal_id, &data),
"Failed to read mobile file token",
)
.await;
}
#[cfg(not(any(target_os = "android", target_os = "ios")))]
#[cfg(not(any(target_os = "android", target_os = "ios", test)))]
let _ = universal_id;
decode_base64_data(data)
}
fn encode_doc_data(&self, universal_id: &str, doc_id: &str, timestamp: i64, data: &[u8]) -> Result<String> {
#[cfg(any(target_os = "android", target_os = "ios"))]
async fn encode_doc_data(&self, universal_id: &str, doc_id: &str, timestamp: i64, data: &[u8]) -> Result<String> {
#[cfg(any(target_os = "android", target_os = "ios", test))]
if data.len() >= MOBILE_BLOB_INLINE_THRESHOLD_BYTES {
let universal_id = universal_id.to_string();
let doc_id = doc_id.to_string();
let data = data.to_vec();
return self
.mobile_blob_cache
.cache_doc_bin(universal_id, doc_id, timestamp, data)
.map_err(|err| UniffiError::Err(format!("Failed to cache doc file: {err}")));
.run_mobile_cache_io(
move |cache| cache.cache_doc_bin(&universal_id, &doc_id, timestamp, &data),
"Failed to cache doc file",
)
.await;
}
#[cfg(not(any(target_os = "android", target_os = "ios")))]
#[cfg(not(any(target_os = "android", target_os = "ios", test)))]
let _ = (universal_id, doc_id, timestamp);
Ok(base64_simd::STANDARD.encode_to_string(data))
@@ -361,18 +346,52 @@ impl DocStoragePool {
/// Initialize the database and run migrations.
pub async fn connect(&self, universal_id: String, path: String) -> Result<()> {
self.inner.connect(universal_id.clone(), path.clone()).await?;
#[cfg(any(target_os = "android", target_os = "ios"))]
self
.mobile_blob_cache
.register_workspace(&universal_id, &path)
.map_err(|err| UniffiError::Err(format!("Failed to initialize mobile blob cache: {err}")))?;
#[cfg(any(target_os = "android", target_os = "ios", test))]
{
let universal_id_for_cache = universal_id.clone();
let path_for_cache = path.clone();
self
.run_mobile_cache_io(
move |cache| cache.register_workspace(&universal_id_for_cache, &path_for_cache),
"Failed to initialize mobile blob cache",
)
.await?;
}
if let Err(err) = self.inner.connect(universal_id.clone(), path).await {
#[cfg(any(target_os = "android", target_os = "ios", test))]
{
let universal_id_for_cache = universal_id.clone();
let _ = self
.run_mobile_cache_io(
move |cache| {
cache.invalidate_workspace(&universal_id_for_cache);
Ok(())
},
"Failed to rollback mobile blob cache workspace",
)
.await;
}
return Err(err.into());
}
Ok(())
}
pub async fn disconnect(&self, universal_id: String) -> Result<()> {
#[cfg(any(target_os = "android", target_os = "ios"))]
self.mobile_blob_cache.invalidate_workspace(&universal_id);
#[cfg(any(target_os = "android", target_os = "ios", test))]
{
let universal_id_for_cache = universal_id.clone();
self
.run_mobile_cache_io(
move |cache| {
cache.invalidate_workspace(&universal_id_for_cache);
Ok(())
},
"Failed to clear mobile blob cache workspace",
)
.await?;
}
self.inner.disconnect(universal_id).await?;
Ok(())
}
@@ -382,7 +401,7 @@ impl DocStoragePool {
}
pub async fn push_update(&self, universal_id: String, doc_id: String, update: String) -> Result<i64> {
let decoded_update = self.decode_mobile_data(&universal_id, &update)?;
let decoded_update = self.decode_mobile_data(&universal_id, &update).await?;
Ok(
self
.inner
@@ -407,7 +426,9 @@ impl DocStoragePool {
};
let timestamp = record.timestamp.and_utc().timestamp_millis();
let bin = self.encode_doc_data(&universal_id, &record.doc_id, timestamp, &record.bin)?;
let bin = self
.encode_doc_data(&universal_id, &record.doc_id, timestamp, &record.bin)
.await?;
Ok(Some(DocRecord {
doc_id: record.doc_id,
bin,
@@ -418,7 +439,7 @@ impl DocStoragePool {
pub async fn set_doc_snapshot(&self, universal_id: String, snapshot: DocRecord) -> Result<bool> {
let doc_record = affine_nbstore::DocRecord {
doc_id: snapshot.doc_id,
bin: Into::<Data>::into(self.decode_mobile_data(&universal_id, &snapshot.bin)?),
bin: Into::<Data>::into(self.decode_mobile_data(&universal_id, &snapshot.bin).await?),
timestamp: chrono::DateTime::<chrono::Utc>::from_timestamp_millis(snapshot.timestamp)
.ok_or(UniffiError::TimestampDecodingError)?
.naive_utc(),
@@ -427,23 +448,26 @@ impl DocStoragePool {
}
pub async fn get_doc_updates(&self, universal_id: String, doc_id: String) -> Result<Vec<DocUpdate>> {
self
let updates = self
.inner
.get(universal_id.clone())
.await?
.get_doc_updates(doc_id)
.await?
.into_iter()
.map(|update| {
let timestamp = update.timestamp.and_utc().timestamp_millis();
let bin = self.encode_doc_data(&universal_id, &update.doc_id, timestamp, &update.bin)?;
Ok(DocUpdate {
doc_id: update.doc_id,
timestamp,
bin,
})
})
.collect()
.await?;
let mut converted = Vec::with_capacity(updates.len());
for update in updates {
let timestamp = update.timestamp.and_utc().timestamp_millis();
let bin = self
.encode_doc_data(&universal_id, &update.doc_id, timestamp, &update.bin)
.await?;
converted.push(DocUpdate {
doc_id: update.doc_id,
timestamp,
bin,
});
}
Ok(converted)
}
pub async fn mark_updates_merged(&self, universal_id: String, doc_id: String, updates: Vec<i64>) -> Result<u32> {
@@ -506,9 +530,17 @@ impl DocStoragePool {
}
pub async fn get_blob(&self, universal_id: String, key: String) -> Result<Option<Blob>> {
#[cfg(any(target_os = "android", target_os = "ios"))]
#[cfg(any(target_os = "android", target_os = "ios", test))]
{
if let Some(blob) = self.mobile_blob_cache.get_blob(&universal_id, &key) {
let universal_id_for_cache = universal_id.clone();
let key_for_cache = key.clone();
if let Some(blob) = self
.run_mobile_cache_io(
move |cache| Ok(cache.get_blob(&universal_id_for_cache, &key_for_cache)),
"Failed to read mobile blob cache",
)
.await?
{
return Ok(Some(blob));
}
@@ -526,30 +558,43 @@ impl DocStoragePool {
return Ok(Some(blob.into()));
}
let universal_id_for_cache = universal_id.clone();
return self
.mobile_blob_cache
.cache_blob(&universal_id, &blob)
.map(Some)
.map_err(|err| UniffiError::Err(format!("Failed to cache blob file: {err}")));
.run_mobile_cache_io(
move |cache| cache.cache_blob(&universal_id_for_cache, &blob).map(Some),
"Failed to cache blob file",
)
.await;
}
#[cfg(not(any(target_os = "android", target_os = "ios")))]
#[cfg(not(any(target_os = "android", target_os = "ios", test)))]
{
Ok(self.inner.get(universal_id).await?.get_blob(key).await?.map(Into::into))
}
}
pub async fn set_blob(&self, universal_id: String, blob: SetBlob) -> Result<()> {
#[cfg(any(target_os = "android", target_os = "ios"))]
#[cfg(any(target_os = "android", target_os = "ios", test))]
let key = blob.key.clone();
let blob = affine_nbstore::SetBlob {
key: blob.key,
data: Into::<Data>::into(self.decode_mobile_data(&universal_id, &blob.data)?),
data: Into::<Data>::into(self.decode_mobile_data(&universal_id, &blob.data).await?),
mime: blob.mime,
};
self.inner.get(universal_id.clone()).await?.set_blob(blob).await?;
#[cfg(any(target_os = "android", target_os = "ios"))]
self.mobile_blob_cache.invalidate_blob(&universal_id, &key);
#[cfg(any(target_os = "android", target_os = "ios", test))]
{
let universal_id_for_cache = universal_id;
self
.run_mobile_cache_io(
move |cache| {
cache.invalidate_blob(&universal_id_for_cache, &key);
Ok(())
},
"Failed to invalidate mobile blob cache entry",
)
.await?;
}
Ok(())
}
@@ -560,15 +605,37 @@ impl DocStoragePool {
.await?
.delete_blob(key.clone(), permanently)
.await?;
#[cfg(any(target_os = "android", target_os = "ios"))]
self.mobile_blob_cache.invalidate_blob(&universal_id, &key);
#[cfg(any(target_os = "android", target_os = "ios", test))]
{
let universal_id_for_cache = universal_id;
self
.run_mobile_cache_io(
move |cache| {
cache.invalidate_blob(&universal_id_for_cache, &key);
Ok(())
},
"Failed to invalidate mobile blob cache entry",
)
.await?;
}
Ok(())
}
pub async fn release_blobs(&self, universal_id: String) -> Result<()> {
self.inner.get(universal_id.clone()).await?.release_blobs().await?;
#[cfg(any(target_os = "android", target_os = "ios"))]
self.mobile_blob_cache.invalidate_workspace(&universal_id);
#[cfg(any(target_os = "android", target_os = "ios", test))]
{
let universal_id_for_cache = universal_id;
self
.run_mobile_cache_io(
move |cache| {
cache.clear_workspace_cache(&universal_id_for_cache);
Ok(())
},
"Failed to clear mobile blob cache workspace",
)
.await?;
}
Ok(())
}
@@ -887,3 +954,119 @@ impl DocStoragePool {
Ok(affine_nbstore::storage::SqliteDocStorage::index_version())
}
}
#[cfg(test)]
mod tests {
use std::{
fs,
sync::atomic::{AtomicU64, Ordering},
time::{SystemTime, UNIX_EPOCH},
};
use super::*;
static TEST_COUNTER: AtomicU64 = AtomicU64::new(0);
fn unique_id(prefix: &str) -> String {
let counter = TEST_COUNTER.fetch_add(1, Ordering::Relaxed);
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.expect("system clock before unix epoch")
.as_nanos();
format!("{prefix}-{now}-{counter}")
}
#[test]
fn doc_update_roundtrip_base64() {
let timestamp = chrono::DateTime::<chrono::Utc>::from_timestamp_millis(1_700_000_000_000)
.unwrap()
.naive_utc();
let original = affine_nbstore::DocUpdate {
doc_id: "doc-1".to_string(),
timestamp,
bin: vec![1, 2, 3, 4, 5],
};
let encoded: DocUpdate = original.into();
let decoded = affine_nbstore::DocUpdate::try_from(encoded).unwrap();
assert_eq!(decoded.doc_id, "doc-1");
assert_eq!(decoded.timestamp, timestamp);
assert_eq!(decoded.bin, vec![1, 2, 3, 4, 5]);
}
#[test]
fn doc_update_rejects_invalid_base64() {
let update = DocUpdate {
doc_id: "doc-2".to_string(),
timestamp: 0,
bin: "not-base64!!".to_string(),
};
let err = match affine_nbstore::DocUpdate::try_from(update) {
Ok(_) => panic!("expected base64 decode error"),
Err(err) => err,
};
match err {
UniffiError::Base64DecodingError(_) => {}
other => panic!("unexpected error: {other:?}"),
}
}
#[tokio::test]
async fn encode_large_doc_payload_returns_file_token_and_decodes_back() {
let pool = new_doc_storage_pool();
let universal_id = unique_id("mobile-doc-token");
pool
.connect(universal_id.clone(), ":memory:".to_string())
.await
.expect("connect should succeed");
let data = vec![7_u8; MOBILE_BLOB_INLINE_THRESHOLD_BYTES + 16];
let encoded = pool
.encode_doc_data(&universal_id, "doc", 42, &data)
.await
.expect("encode should succeed");
assert!(encoded.starts_with(mobile_blob_cache::MOBILE_DOC_FILE_PREFIX));
let decoded = pool
.decode_mobile_data(&universal_id, &encoded)
.await
.expect("decode should succeed");
assert_eq!(decoded, data);
pool.disconnect(universal_id).await.expect("disconnect should succeed");
}
#[tokio::test]
async fn decode_mobile_data_rejects_out_of_workspace_path() {
let pool = new_doc_storage_pool();
let universal_id = unique_id("mobile-doc-outside");
pool
.connect(universal_id.clone(), ":memory:".to_string())
.await
.expect("connect should succeed");
let outside_dir = std::env::temp_dir().join(unique_id("mobile-doc-outside-dir"));
fs::create_dir_all(&outside_dir).expect("create outside dir");
let outside_file = outside_dir.join("1234567890abcdef.blob");
fs::write(&outside_file, b"outside").expect("write outside file");
let token = format!(
"{}{}",
mobile_blob_cache::MOBILE_BLOB_FILE_PREFIX,
outside_file.display()
);
let err = pool
.decode_mobile_data(&universal_id, &token)
.await
.expect_err("decode should reject out-of-workspace token");
let UniffiError::Err(message) = err else {
panic!("unexpected error kind");
};
assert!(message.contains("outside the workspace cache directory"));
pool.disconnect(universal_id).await.expect("disconnect should succeed");
let _ = fs::remove_dir_all(outside_dir);
}
}

View File

@@ -146,7 +146,39 @@ impl MobileBlobCache {
}
}
pub(crate) fn clear_workspace_cache(&self, universal_id: &str) {
self.evict_workspace_entries(universal_id);
let cache_dir = {
self
.workspace_dirs
.lock()
.expect("workspace cache lock poisoned")
.get(universal_id)
.cloned()
};
if let Some(cache_dir) = cache_dir {
let _ = Self::cleanup_cache_dir(&cache_dir);
}
}
pub(crate) fn invalidate_workspace(&self, universal_id: &str) {
self.evict_workspace_entries(universal_id);
if let Some(cache_dir) = self
.workspace_dirs
.lock()
.expect("workspace cache lock poisoned")
.remove(universal_id)
{
let _ = std::fs::remove_dir_all(&cache_dir);
if let Some(parent) = cache_dir.parent() {
let _ = std::fs::remove_dir(parent);
}
}
}
fn evict_workspace_entries(&self, universal_id: &str) {
let prefix = format!("{universal_id}\u{1f}");
let mut blob_entries = self.blob_entries.lock().expect("blob cache lock poisoned");
@@ -171,18 +203,6 @@ impl MobileBlobCache {
Self::delete_blob_file(&path);
}
}
if let Some(cache_dir) = self
.workspace_dirs
.lock()
.expect("workspace cache lock poisoned")
.remove(universal_id)
{
let _ = std::fs::remove_dir_all(&cache_dir);
if let Some(parent) = cache_dir.parent() {
let _ = std::fs::remove_dir(parent);
}
}
}
fn cache_key(universal_id: &str, key: &str) -> String {
@@ -552,6 +572,42 @@ mod tests {
);
}
#[test]
fn clear_workspace_cache_removes_cached_files_and_keeps_workspace_dir() {
let (cache, universal_id, workspace) = setup_cache("clear");
let cached_blob = cache
.cache_blob(&universal_id, &build_blob("blob", vec![1, 2, 3]))
.expect("cache blob");
let blob_path = token_path(&cached_blob.data);
let doc_token = cache
.cache_doc_bin(&universal_id, "doc", 123, b"doc-bytes")
.expect("cache doc bin");
let doc_path = token_path(&doc_token);
assert!(blob_path.exists());
assert!(doc_path.exists());
cache.clear_workspace_cache(&universal_id);
assert!(!blob_path.exists());
assert!(!doc_path.exists());
assert!(workspace.exists());
assert!(
cache
.workspace_dirs
.lock()
.expect("workspace cache lock poisoned")
.contains_key(&universal_id)
);
let recached_blob = cache
.cache_blob(&universal_id, &build_blob("blob-2", vec![7, 8, 9]))
.expect("cache blob after clearing workspace");
assert!(token_path(&recached_blob.data).exists());
cache.invalidate_workspace(&universal_id);
}
#[test]
fn read_binary_file_returns_not_found_for_missing_file() {
let (cache, universal_id, _workspace) = setup_cache("missing-file");

View File

@@ -773,6 +773,7 @@ __metadata:
"@affine/core": "workspace:*"
"@capacitor/core": "npm:^7.0.0"
typescript: "npm:^5.7.2"
vitest: "npm:^3.2.4"
languageName: unknown
linkType: soft