import from pijul repo / WIP

This commit is contained in:
2020-12-24 11:25:50 +01:00
commit 6ce29d99fb
22 changed files with 2369 additions and 0 deletions

21
crdt-enc/Cargo.toml Normal file
View File

@@ -0,0 +1,21 @@
[package]
name = "crdt-enc"
version = "0.1.0"
authors = ["Thomas Heck <t@b128.net>"]
edition = "2018"
[dependencies]
crdts = "4"
serde = "1"
serde_bytes = "0.11"
rmp-serde = "0.14"
async-trait = "0.1"
anyhow = "1"
thiserror = "1"
futures = "0.3"
dyn-clone = "1"
bytes = "0.5"
[dependencies.uuid]
version = "0.8"
features = ["serde", "v4"]

32
crdt-enc/src/cryptor.rs Normal file
View File

@@ -0,0 +1,32 @@
use crate::{
utils::{VersionBytes, VersionBytesRef},
CoreSubHandle, Info, KeyCryptor, Storage,
};
use anyhow::Result;
use async_trait::async_trait;
use crdts::{CmRDT, CvRDT, MVReg};
use serde::{de::DeserializeOwned, Serialize};
use std::{fmt::Debug, sync::Arc};
use uuid::Uuid;
#[async_trait]
pub trait Cryptor
where
Self: 'static + Debug + Send + Sync + Sized,
{
async fn init(&self, _core: &dyn CoreSubHandle) -> Result<()> {
Ok(())
}
async fn set_info(&self, _info: &Info) -> Result<()> {
Ok(())
}
async fn set_remote_meta(&self, _data: Option<MVReg<VersionBytes, Uuid>>) -> Result<()> {
Ok(())
}
async fn gen_key(&self) -> Result<VersionBytes>;
async fn encrypt(&self, key: VersionBytesRef<'_>, clear_text: &[u8]) -> Result<Vec<u8>>;
async fn decrypt(&self, key: VersionBytesRef<'_>, enc_data: &[u8]) -> Result<Vec<u8>>;
}

133
crdt-enc/src/key_cryptor.rs Normal file
View File

@@ -0,0 +1,133 @@
use crate::{
utils::{VersionBytes, VersionBytesRef},
CoreSubHandle, Cryptor, Info, Storage,
};
use anyhow::Result;
use async_trait::async_trait;
use crdts::{CmRDT, CvRDT, MVReg, Orswot};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{
borrow::Borrow,
cmp::{Eq, Ord, Ordering, PartialEq},
fmt::Debug,
hash::{Hash, Hasher},
sync::Arc,
};
use uuid::Uuid;
#[async_trait]
pub trait KeyCryptor
where
Self: 'static + Debug + Send + Sync + Sized,
{
async fn init(&self, _core: &dyn CoreSubHandle) -> Result<()> {
Ok(())
}
async fn set_info(&self, _info: &Info) -> Result<()> {
Ok(())
}
async fn set_remote_meta(&self, _data: Option<MVReg<VersionBytes, Uuid>>) -> Result<()> {
Ok(())
}
async fn set_keys(&self, keys: Keys) -> Result<()>;
}
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct Keys {
latest_key_id: MVReg<Uuid, Uuid>,
keys: Orswot<Key, Uuid>,
}
impl CvRDT for Keys {
fn merge(&mut self, other: Keys) {
self.latest_key_id.merge(other.latest_key_id);
self.keys.merge(other.keys);
}
}
impl Keys {
pub fn get_key(&self, key_id: Uuid) -> Option<Key> {
self.keys.read().val.take(&key_id)
}
pub fn latest_key(&self) -> Option<Key> {
let mut keys = self.keys.read().val;
self.latest_key_id
.read()
.val
.into_iter()
.flat_map(move |id| keys.take(&id))
.min()
}
pub fn insert_latest_key(&mut self, actor: Uuid, new_key: Key) {
let key_id = new_key.id();
let write_ctx = self.keys.read_ctx().derive_add_ctx(actor);
let op = self.keys.add(new_key, write_ctx);
self.keys.apply(op);
let write_ctx = self.latest_key_id.read_ctx().derive_add_ctx(actor);
let op = self.latest_key_id.write(key_id, write_ctx);
self.latest_key_id.apply(op);
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct Key {
id: Uuid,
key: VersionBytes,
}
impl Key {
pub fn new(key: VersionBytes) -> Key {
Self::new_with_id(Uuid::new_v4(), key)
}
pub fn new_with_id(id: Uuid, key: VersionBytes) -> Key {
Key { id, key }
}
pub fn id(&self) -> Uuid {
self.id
}
pub fn key(&self) -> VersionBytesRef<'_> {
self.key.as_version_bytes_ref()
}
}
impl Borrow<Uuid> for Key {
fn borrow(&self) -> &Uuid {
&self.id
}
}
impl Hash for Key {
fn hash<H: Hasher>(&self, state: &mut H) {
self.id.hash(state);
}
}
impl PartialEq for Key {
fn eq(&self, other: &Self) -> bool {
self.id.eq(&other.id)
}
}
impl Eq for Key {}
impl PartialOrd for Key {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.id.partial_cmp(&other.id)
}
}
impl Ord for Key {
fn cmp(&self, other: &Self) -> Ordering {
self.id.cmp(&other.id)
}
}

759
crdt-enc/src/lib.rs Normal file
View File

@@ -0,0 +1,759 @@
pub mod cryptor;
pub mod key_cryptor;
pub mod storage;
pub mod task;
pub mod utils;
use crate::{
cryptor::Cryptor,
key_cryptor::{Key, KeyCryptor, Keys},
storage::Storage,
utils::VersionBytes,
};
use anyhow::{Context, Error, Result};
use async_trait::async_trait;
use crdts::{CmRDT, CvRDT, MVReg, VClock};
use dyn_clone::DynClone;
use futures::{
future::BoxFuture,
lock::Mutex as AsyncMutex,
stream::{self, StreamExt, TryStreamExt},
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::{
collections::HashSet,
default::Default,
fmt::Debug,
mem,
sync::{Arc, Mutex as SyncMutex},
};
use uuid::Uuid;
#[async_trait]
pub trait CoreSubHandle
where
Self: 'static + Debug + Send + Sync + DynClone,
{
async fn compact(&self) -> Result<()>;
async fn read_remote(&self) -> Result<()>;
async fn read_remote_meta(&self) -> Result<()>;
async fn set_keys(&self, keys: Keys) -> Result<()>;
async fn set_remote_meta_storage(&self, remote_meta: MVReg<VersionBytes, Uuid>) -> Result<()>;
async fn set_remote_meta_cryptor(&self, remote_meta: MVReg<VersionBytes, Uuid>) -> Result<()>;
async fn set_remote_meta_key_cryptor(
&self,
remote_meta: MVReg<VersionBytes, Uuid>,
) -> Result<()>;
}
#[async_trait]
impl<S, ST, C, KC> CoreSubHandle for Arc<Core<S, ST, C, KC>>
where
S: 'static
+ CmRDT
+ CvRDT
+ Default
+ Serialize
+ DeserializeOwned
+ Clone
+ Debug
+ Send
+ Sync,
<S as CmRDT>::Op: 'static + Serialize + DeserializeOwned + Clone + Send,
ST: Storage,
C: Cryptor,
KC: KeyCryptor,
{
async fn compact(&self) -> Result<()> {
self.compact().await
}
async fn read_remote(&self) -> Result<()> {
self.read_remote().await
}
async fn read_remote_meta(&self) -> Result<()> {
self.read_remote_meta().await
}
async fn set_keys(&self, keys: Keys) -> Result<()> {
self.set_keys(keys).await
}
async fn set_remote_meta_storage(&self, remote_meta: MVReg<VersionBytes, Uuid>) -> Result<()> {
self.set_remote_meta_storage(remote_meta).await
}
async fn set_remote_meta_cryptor(&self, remote_meta: MVReg<VersionBytes, Uuid>) -> Result<()> {
self.set_remote_meta_cryptor(remote_meta).await
}
async fn set_remote_meta_key_cryptor(
&self,
remote_meta: MVReg<VersionBytes, Uuid>,
) -> Result<()> {
self.set_remote_meta_key_cryptor(remote_meta).await
}
}
// #[async_trait]
// pub trait CoreTrait
// where
// Self: 'static + Debug + Send + Sync + Clone,
// <Self::State as CmRDT>::Op: 'static + Serialize + DeserializeOwned + Clone + Send,
// {
// type State: 'static
// + CmRDT
// + CvRDT
// + Default
// + Serialize
// + DeserializeOwned
// + Clone
// + Debug
// + Send
// + Sync;
// async fn compact(&self) -> Result<()>;
// async fn read_remote(&self) -> Result<()>;
// async fn read_remote_meta(&self) -> Result<()>;
// async fn apply_ops(&self, ops: Vec<<Self::State as CmRDT>::Op>) -> Result<()>;
// async fn set_remote_meta_storage(&self, remote_meta: MVReg<VersionBytes, Uuid>) -> Result<()>;
// async fn set_remote_meta_cryptor(&self, remote_meta: MVReg<VersionBytes, Uuid>) -> Result<()>;
// async fn set_remote_meta_key_cryptor(&self, remote_meta: MVReg<VersionBytes, Uuid>)
// -> Result<()>;
// }
// #[async_trait]
// impl<S, ST, C, KC> CoreTrait for Arc<Core<S, ST, C, KC>>
// where
// S: 'static
// + CmRDT
// + CvRDT
// + Default
// + Serialize
// + DeserializeOwned
// + Clone
// + Debug
// + Send
// + Sync,
// <S as CmRDT>::Op: 'static + Serialize + DeserializeOwned + Clone + Send,
// ST: Storage<Self>,
// C: Cryptor<Self>,
// KC: KeyCryptor<Self>,
// {
// type State = S;
// async fn compact(&self) -> Result<()> {
// self.compact_().await
// }
// async fn read_remote(&self) -> Result<()> {
// self.read_remote_().await
// }
// async fn read_remote_meta(&self) -> Result<()> {
// self.read_remote_meta_(false).await
// }
// async fn apply_ops(&self, ops: Vec<<Self::State as CmRDT>::Op>) -> Result<()> {
// self.apply_ops_(ops).await
// }
// async fn set_remote_meta_storage(&self, remote_meta: MVReg<VersionBytes, Uuid>) -> Result<()> {
// self.set_remote_meta_storage_(remote_meta).await
// }
// async fn set_remote_meta_cryptor(&self, remote_meta: MVReg<VersionBytes, Uuid>) -> Result<()> {
// self.set_remote_meta_cryptor_(remote_meta).await
// }
// async fn set_remote_meta_key_cryptor(
// &self,
// remote_meta: MVReg<VersionBytes, Uuid>,
// ) -> Result<()> {
// self.set_remote_meta_key_cryptor_(remote_meta).await
// }
// }
#[derive(Debug)]
pub struct Core<S, ST, C, KC> {
storage: ST,
cryptor: C,
key_cryptor: KC,
// use sync `std::sync::Mutex` here because it has less overhead than async mutex, we are
// holding it for a very shot time and do not `.await` while the lock is held.
data: SyncMutex<CoreMutData<S>>,
// task_mgr: task::TaskMgr,
supported_data_versions: Vec<Uuid>,
current_data_version: Uuid,
apply_ops_lock: AsyncMutex<()>,
}
#[derive(Debug)]
struct CoreMutData<S> {
local_meta: Option<LocalMeta>,
remote_meta: RemoteMeta,
keys: Keys,
state: StateWrapper<S>,
read_states: HashSet<String>,
read_remote_metas: HashSet<String>,
}
impl<S, ST, C, KC> Core<S, ST, C, KC>
where
S: 'static
+ CmRDT
+ CvRDT
+ Default
+ Serialize
+ DeserializeOwned
+ Clone
+ Debug
+ Send
+ Sync,
<S as CmRDT>::Op: 'static + Serialize + DeserializeOwned + Clone + Send,
ST: Storage,
C: Cryptor,
KC: KeyCryptor,
{
pub async fn open(options: OpenOptions<ST, C, KC>) -> Result<(Arc<Self>, Info)> {
let core_data = SyncMutex::new(CoreMutData {
local_meta: None,
remote_meta: RemoteMeta::default(),
keys: Keys::default(),
state: StateWrapper {
next_op_versions: Default::default(),
state: Default::default(),
},
read_states: HashSet::new(),
read_remote_metas: HashSet::new(),
});
let mut supported_data_versions = options.supported_data_versions;
supported_data_versions.sort_unstable();
let core = Arc::new(Core {
storage: options.storage,
cryptor: options.cryptor,
key_cryptor: options.key_cryptor,
supported_data_versions,
current_data_version: options.current_data_version,
data: core_data,
apply_ops_lock: AsyncMutex::new(()),
});
futures::try_join![
core.storage.init(&core),
core.cryptor.init(&core),
core.key_cryptor.init(&core),
]?;
let local_meta = core
.storage
.load_local_meta()
.await
.context("failed getting local meta")?;
let local_meta: LocalMeta = match local_meta {
Some(local_meta) => {
local_meta.ensure_versions(&core.supported_data_versions)?;
rmp_serde::from_read_ref(&local_meta)?
}
None => {
if !options.create {
return Err(Error::msg(
"local meta does not exist, and `create` option is not set",
));
}
let local_meta = LocalMeta {
local_actor_id: Uuid::new_v4(),
};
// TODO: use core version
let vbox = VersionBytes::new(
core.current_data_version,
rmp_serde::to_vec_named(&local_meta)?,
);
core.storage
.store_local_meta(vbox)
.await
.context("failed storing local meta")?;
local_meta
}
};
let info = Info {
actor: local_meta.local_actor_id,
};
core.with_mut_data(|data| {
data.local_meta = Some(local_meta);
Ok(())
})?;
futures::try_join![
core.storage.set_info(&info),
core.cryptor.set_info(&info),
core.key_cryptor.set_info(&info),
]?;
core.read_remote_meta_(true).await?;
let insert_new_key = core.with_mut_data(|data| Ok(data.keys.latest_key().is_none()))?;
if insert_new_key {
let new_key = core.cryptor.gen_key().await?;
let keys = core.with_mut_data(|data| {
data.keys.insert_latest_key(info.actor(), Key::new(new_key));
Ok(data.keys.clone())
})?;
core.key_cryptor.set_keys(keys).await?;
}
Ok((core, info))
}
fn with_mut_data<F, R>(self: &Arc<Self>, f: F) -> Result<R>
where
F: FnOnce(&mut CoreMutData<S>) -> Result<R>,
{
let mut data = self
.data
.lock()
.map_err(|err| Error::msg(format!("unable to lock `CoreMutData`: {}", err)))?;
f(&mut *data)
}
/// Locks cores data, do not call recursivl
pub fn with_state<F, R>(self: &Arc<Self>, f: F) -> Result<R>
where
F: FnOnce(&S) -> Result<R>,
{
self.with_mut_data(|data| f(&data.state.state))
}
pub async fn compact(self: &Arc<Self>) -> Result<()> {
self.read_remote().await?;
let (clear_text, states_to_remove, ops_to_remove, key) = self.with_mut_data(|data| {
let clear_text = rmp_serde::to_vec_named(&data.state)?;
let states_to_remove = data.read_states.iter().cloned().collect();
let ops_to_remove = data
.state
.next_op_versions
.iter()
.map(|dot| (dot.actor.clone(), dot.counter - 1))
.collect();
let key = data.keys.latest_key().context("no latest key")?;
Ok((clear_text, states_to_remove, ops_to_remove, key))
})?;
let data_enc = self.cryptor.encrypt(key.key(), &clear_text).await.unwrap();
let enc_data = VersionBytes::new(self.current_data_version, data_enc);
// first store new state
let new_state_name = self.storage.store_state(enc_data).await?;
// then remove old states and ops
let (removed_states, _) = futures::try_join![
self.storage.remove_states(states_to_remove),
self.storage.remove_ops(ops_to_remove),
]?;
self.with_mut_data(|data| {
for removed_state in removed_states {
data.read_states.remove(&removed_state);
}
data.read_states.insert(new_state_name);
Ok(())
})?;
Ok(())
}
async fn set_keys(self: &Arc<Self>, keys: Keys) -> Result<()> {
self.with_mut_data(|data| {
data.keys.merge(keys);
Ok(())
})?;
Ok(())
}
pub async fn read_remote(self: &Arc<Self>) -> Result<()> {
let states_read = self.read_remote_states().await?;
let ops_read = self.read_remote_ops().await?;
if states_read || ops_read {
// TODO: notify app of state changes
}
Ok(())
}
async fn read_remote_states(self: &Arc<Self>) -> Result<bool> {
let names = self
.storage
.list_state_names()
.await
.context("failed getting state entry names while reading remote states")?;
let (states_to_read, key) = self.with_mut_data(|data| {
let states_to_read: Vec<_> = names
.into_iter()
.filter(|name| !data.read_states.contains(name))
.collect();
let key = data.keys.latest_key().context("no latest key")?;
Ok((states_to_read, key))
})?;
let new_states = self
.storage
.load_states(states_to_read)
.await
.context("failed loading state content while reading remote states")?;
let new_states: Vec<_> = stream::iter(new_states)
.map(|(name, state)| {
let key = key.clone();
async move {
// TODO: use "Core"s version because we are storing the state in a wrapper with
// other data, and also store app version
state.ensure_versions(&self.supported_data_versions)?;
let clear_text = self
.cryptor
.decrypt(key.key(), state.as_ref())
.await
.with_context(|| format!("failed decrypting remote state {}", name))?;
let state_wrapper: StateWrapper<S> = rmp_serde::from_read_ref(&clear_text)?;
Result::<_>::Ok((name, state_wrapper))
}
})
.buffer_unordered(16)
.try_collect()
.await?;
let states_read = !new_states.is_empty();
self.with_mut_data(|data| {
for (name, state_wrapper) in new_states {
data.state.state.merge(state_wrapper.state);
data.state
.next_op_versions
.merge(state_wrapper.next_op_versions);
data.read_states.insert(name);
}
Ok(())
})?;
Ok(states_read)
}
async fn read_remote_ops(self: &Arc<Self>) -> Result<bool> {
let actors = self
.storage
.list_op_actors()
.await
.context("failed getting op actor entries while reading remote ops")?;
let (ops_to_read, key) = self.with_mut_data(|data| {
let ops_to_read: Vec<_> = actors
.into_iter()
.map(|actor| (actor, data.state.next_op_versions.get(&actor)))
.collect();
let key = data.keys.latest_key().context("no latest key")?;
Ok((ops_to_read, key))
})?;
let new_ops = self.storage.load_ops(ops_to_read).await?;
let new_ops: Vec<_> = stream::iter(new_ops)
.map(|(actor, version, data)| {
let key = key.clone();
async move {
// TODO: use cores version
data.ensure_versions(&self.supported_data_versions)?;
let clear_text = self
.cryptor
.decrypt(key.key(), data.as_ref())
.await
.unwrap();
let ops: Vec<_> = rmp_serde::from_read_ref(&clear_text)?;
// TODO: check apps version
Result::<_, Error>::Ok((actor, version, ops))
}
})
.buffered(16)
.try_collect()
.await?;
let ops_read = self.with_mut_data(|data| {
let mut ops_read = false;
for (actor, version, ops) in new_ops {
let expected_version = data.state.next_op_versions.get(&actor);
if version < expected_version {
// already read that version (concurrent call to this fn between us reading
// the ops and processing them)
continue;
}
if expected_version < version {
return Err(Error::msg(
"Unexpected op version. Got ops in the wrong order? Bug in storage?",
));
}
for op in ops {
data.state.state.apply(op);
}
let version_inc = data.state.next_op_versions.inc(actor);
data.state.next_op_versions.apply(version_inc);
ops_read = true;
}
Ok(ops_read)
})?;
Ok(ops_read)
}
async fn read_remote_meta(self: &Arc<Self>) -> Result<()> {
self.read_remote_meta_(false).await
}
async fn read_remote_meta_(self: &Arc<Self>, force_notify: bool) -> Result<()> {
let names = self
.storage
.list_remote_meta_names()
.await
.context("failed getting remote meta entry names while reading remote metas")?;
let remote_metas_to_read = self.with_mut_data(|data| {
let remote_metas_to_read: Vec<_> = names
.into_iter()
.filter(|name| !data.read_remote_metas.contains(name))
.collect();
Ok(remote_metas_to_read)
})?;
let remote_metas = self
.storage
.load_remote_metas(remote_metas_to_read)
.await
.context("failed loading remote meta while reading remote metas")?
.into_iter()
.map(|(name, vbox)| {
// TODO: use "Core"s version because we are storing the state in a wrapper with
// other data, and also store app version
vbox.ensure_versions(&self.supported_data_versions)?;
let remote_meta: RemoteMeta = rmp_serde::from_read_ref(&vbox)?;
Ok((name, remote_meta))
})
.collect::<Result<Vec<_>>>()?;
let remote_meta = if !remote_metas.is_empty() {
self.with_mut_data(|data| {
for (name, meta) in remote_metas {
data.remote_meta.merge(meta);
data.read_remote_metas.insert(name);
}
Ok(Some(data.remote_meta.clone()))
})?
} else {
None
};
if let Some(remote_meta) = remote_meta {
futures::try_join![
self.storage.set_remote_meta(Some(remote_meta.storage)),
self.cryptor.set_remote_meta(Some(remote_meta.cryptor)),
self.key_cryptor
.set_remote_meta(Some(remote_meta.key_cryptor)),
]?;
} else if force_notify {
futures::try_join![
self.storage.set_remote_meta(None),
self.cryptor.set_remote_meta(None),
self.key_cryptor.set_remote_meta(None),
]?;
}
Ok(())
}
async fn set_remote_meta_storage(
self: &Arc<Self>,
remote_meta: MVReg<VersionBytes, Uuid>,
) -> Result<()> {
self.with_mut_data(|data| {
data.remote_meta.storage.merge(remote_meta);
Ok(())
})?;
self.store_remote_meta().await
}
async fn set_remote_meta_cryptor(
self: &Arc<Self>,
remote_meta: MVReg<VersionBytes, Uuid>,
) -> Result<()> {
self.with_mut_data(|data| {
data.remote_meta.cryptor.merge(remote_meta);
Ok(())
})?;
self.store_remote_meta().await
}
async fn set_remote_meta_key_cryptor(
self: &Arc<Self>,
remote_meta: MVReg<VersionBytes, Uuid>,
) -> Result<()> {
self.with_mut_data(|data| {
data.remote_meta.key_cryptor.merge(remote_meta);
Ok(())
})?;
self.store_remote_meta().await
}
async fn store_remote_meta(self: &Arc<Self>) -> Result<()> {
let vbox = self.with_mut_data(|data| {
let bytes = rmp_serde::to_vec_named(&data.remote_meta)?;
// TODO: use core version
Ok(VersionBytes::new(self.current_data_version, bytes))
})?;
let new_name = self.storage.store_remote_meta(vbox).await?;
let names_to_remove = self.with_mut_data(|data| {
let names_to_remove = data.read_remote_metas.drain().collect();
data.read_remote_metas.insert(new_name);
Ok(names_to_remove)
})?;
self.storage.remove_remote_metas(names_to_remove).await?;
Ok(())
}
pub async fn apply_ops(self: &Arc<Self>, ops: Vec<S::Op>) -> Result<()> {
// don't allow concurrent op applies
let apply_ops_lock = self.apply_ops_lock.lock().await;
let clear_text = rmp_serde::to_vec_named(&ops)?;
let key = self.with_mut_data(|data| data.keys.latest_key().context("no latest key"))?;
let data_enc = self.cryptor.encrypt(key.key(), &clear_text).await.unwrap();
// TODO: add key id
// let block = Block {
// data_version: self.current_data_version,
// key_id: Uuid::nil(),
// data_enc,
// };
// TODO: use core version
let data_enc = VersionBytes::new(self.current_data_version, data_enc);
let (actor, version) = self.with_mut_data(|data| {
let actor = data
.local_meta
.as_ref()
.ok_or_else(|| Error::msg("local meta not loaded"))?
.local_actor_id;
let version = data.state.next_op_versions.get(&actor);
Ok((actor, version))
})?;
self.storage.store_ops(actor, version, data_enc).await?;
self.with_mut_data(|data| {
for op in ops {
data.state.state.apply(op);
}
let version_inc = data.state.next_op_versions.inc(actor);
data.state.next_op_versions.apply(version_inc);
Ok(())
})?;
// release lock by hand to prevent an early release by accident
mem::drop(apply_ops_lock);
Ok(())
}
}
pub struct OpenOptions<ST, C, KC> {
pub storage: ST,
pub cryptor: C,
pub key_cryptor: KC,
pub create: bool,
pub supported_data_versions: Vec<Uuid>,
pub current_data_version: Uuid,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct LocalMeta {
pub(crate) local_actor_id: Uuid,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub(crate) struct StateWrapper<S> {
pub(crate) next_op_versions: VClock<Uuid>,
pub(crate) state: S,
}
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
struct RemoteMeta {
storage: MVReg<VersionBytes, Uuid>,
cryptor: MVReg<VersionBytes, Uuid>,
key_cryptor: MVReg<VersionBytes, Uuid>,
}
impl CvRDT for RemoteMeta {
fn merge(&mut self, other: Self) {
self.storage.merge(other.storage);
self.cryptor.merge(other.cryptor);
self.key_cryptor.merge(other.key_cryptor);
}
}
#[derive(Debug, Clone)]
pub struct Info {
actor: Uuid,
}
impl Info {
pub fn actor(&self) -> Uuid {
self.actor
}
}

48
crdt-enc/src/storage.rs Normal file
View File

@@ -0,0 +1,48 @@
use crate::{utils::VersionBytes, CoreSubHandle, Cryptor, Info, KeyCryptor};
use anyhow::Result;
use async_trait::async_trait;
use crdts::{CmRDT, CvRDT, MVReg};
use serde::{de::DeserializeOwned, Serialize};
use std::{fmt::Debug, sync::Arc};
use uuid::Uuid;
#[async_trait]
pub trait Storage
where
Self: 'static + Debug + Send + Sync + Sized,
{
async fn init(&self, _core: &dyn CoreSubHandle) -> Result<()> {
Ok(())
}
async fn set_info(&self, _info: &Info) -> Result<()> {
Ok(())
}
async fn set_remote_meta(&self, _data: Option<MVReg<VersionBytes, Uuid>>) -> Result<()> {
Ok(())
}
async fn load_local_meta(&self) -> Result<Option<VersionBytes>>;
async fn store_local_meta(&self, data: VersionBytes) -> Result<()>;
async fn list_remote_meta_names(&self) -> Result<Vec<String>>;
async fn load_remote_metas(&self, names: Vec<String>) -> Result<Vec<(String, VersionBytes)>>;
async fn store_remote_meta(&self, data: VersionBytes) -> Result<String>;
async fn remove_remote_metas(&self, names: Vec<String>) -> Result<()>;
async fn list_state_names(&self) -> Result<Vec<String>>;
async fn load_states(&self, names: Vec<String>) -> Result<Vec<(String, VersionBytes)>>;
async fn store_state(&self, data: VersionBytes) -> Result<String>;
async fn remove_states(&self, names: Vec<String>) -> Result<Vec<String>>;
async fn list_op_actors(&self) -> Result<Vec<Uuid>>;
/// needs to return the ops ordered by version of that actor
async fn load_ops(
&self,
actor_first_versions: Vec<(Uuid, u64)>,
) -> Result<Vec<(Uuid, u64, VersionBytes)>>;
async fn store_ops(&self, actor: Uuid, version: u64, data: VersionBytes) -> Result<()>;
async fn remove_ops(&self, actor_last_verions: Vec<(Uuid, u64)>) -> Result<()>;
}

159
crdt-enc/src/task.rs Normal file
View File

@@ -0,0 +1,159 @@
use anyhow::Result;
use futures::{
channel::mpsc,
future::{self, BoxFuture, Future, FutureExt},
stream::FuturesUnordered,
stream::{FusedStream, StreamExt},
task::{self, Poll, SpawnError},
};
use std::{fmt, pin::Pin, result::Result as StdResult};
// thread_local! {
// // need to use `Box<Any>` here, <https://github.com/rust-lang/rust/issues/57775>
// static TL_DATA: RefCell<Option<Box<dyn Any + 'static>>> = RefCell::new(None);
// }
// pub struct TaskMgrAccessor;
// impl TaskMgrAccessor {
// pub fn with<T, F, R>(f: F) -> R
// where
// T: 'static,
// F: FnOnce(&mut T) -> R,
// {
// TL_DATA.with(|data| {
// let mut data = data.borrow_mut();
// let data = data.as_mut().expect("TaskMgrAccessor data not set");
// if let Some(data) = data.downcast_mut::<T>() {
// f(data)
// } else {
// panic!(format!(
// "Data in TaskMgrAccessor has wrong type, expected type: {}",
// any::type_name::<T>()
// ));
// }
// })
// }
// pub fn set_with<T, F, R>(val: T, f: F) -> (T, R)
// where
// T: 'static,
// F: FnOnce() -> R,
// {
// TL_DATA.with(|data| {
// let mut data = data.borrow_mut();
// *data = Some(Box::new(val));
// });
// let res = f();
// let val = TL_DATA.with(|data| {
// let mut data = data.borrow_mut();
// let data = data.take().expect("TaskMgrAccessor data not set");
// if let Ok(data) = data.downcast::<T>() {
// *data
// } else {
// panic!(format!(
// "Data in TaskMgrAccessor has wrong type, expected type: {}",
// any::type_name::<T>()
// ));
// }
// });
// (val, res)
// }
// }
pub struct TaskMgrExecutor {
futs: FuturesUnordered<BoxFuture<'static, Result<()>>>,
rx: mpsc::UnboundedReceiver<BoxFuture<'static, Result<()>>>,
}
impl Future for TaskMgrExecutor {
type Output = Result<()>;
fn poll(mut self: Pin<&mut Self>, ctx: &mut task::Context) -> Poll<Self::Output> {
while let Poll::Ready(Some(fut)) = self.rx.poll_next_unpin(ctx) {
self.futs.push(fut);
}
if self.futs.is_empty() {
if self.rx.is_terminated() {
// no running tasks & the receiver closed => exit
return Poll::Ready(Ok(()));
} else {
return Poll::Pending;
}
}
while let Poll::Ready(res) = self.futs.poll_next_unpin(ctx) {
match res {
Some(Ok(())) => {}
Some(Err(err)) => {
return Poll::Ready(Err(err));
}
None => {
return Poll::Ready(Ok(()));
}
}
}
Poll::Pending
}
}
impl fmt::Debug for TaskMgrExecutor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TaskMgrExecutor").finish()
}
}
#[derive(Clone)]
pub struct TaskMgr {
tx: mpsc::UnboundedSender<BoxFuture<'static, Result<()>>>,
}
impl TaskMgr {
pub fn new() -> (Self, TaskMgrExecutor) {
let (tx, rx) = mpsc::unbounded();
(
TaskMgr { tx },
TaskMgrExecutor {
futs: FuturesUnordered::new(),
rx,
},
)
}
pub fn spawn<F>(&self, fut: F) -> StdResult<(), SpawnError>
where
F: 'static + Send + Future<Output = Result<()>>,
{
self.tx
.unbounded_send(fut.boxed())
.map_err(|_| SpawnError::shutdown())?;
Ok(())
}
pub fn spawn_with_handle<F>(
&self,
fut: F,
) -> StdResult<future::RemoteHandle<F::Output>, SpawnError>
where
F: 'static + Send + Future,
F::Output: 'static + Send,
{
let (remote, handle) = fut.remote_handle();
self.tx
.unbounded_send(remote.map(|()| Result::Ok(())).boxed())
.map_err(|_| SpawnError::shutdown())?;
Ok(handle)
}
}
impl fmt::Debug for TaskMgr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TaskMgr").finish()
}
}

19
crdt-enc/src/utils.rs Normal file
View File

@@ -0,0 +1,19 @@
mod version_bytes;
pub use version_bytes::*;
use crdts::{CmRDT, CvRDT};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct EmptyCrdt;
impl CmRDT for EmptyCrdt {
type Op = ();
fn apply(&mut self, _op: Self::Op) {}
}
impl CvRDT for EmptyCrdt {
fn merge(&mut self, _other: Self) {}
}

View File

@@ -0,0 +1,273 @@
use serde::{Deserialize, Serialize};
use serde_bytes;
use std::{borrow::Cow, convert::TryFrom, fmt, io::IoSlice};
use uuid::Uuid;
#[derive(Debug)]
pub struct VersionError {
expected: Vec<Uuid>,
got: Uuid,
}
impl fmt::Display for VersionError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"version check failed, got: {}, expected one of: ",
self.got
)?;
for (i, e) in self.expected.iter().enumerate() {
if i != 0 {
f.write_str(", ")?;
}
fmt::Display::fmt(e, f)?;
}
Ok(())
}
}
impl std::error::Error for VersionError {}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VersionBytes(Uuid, #[serde(with = "serde_bytes")] Vec<u8>);
impl VersionBytes {
pub fn new(version: Uuid, content: Vec<u8>) -> VersionBytes {
VersionBytes(version, content)
}
pub fn version(&self) -> Uuid {
self.0
}
pub fn ensure_version(&self, version: Uuid) -> Result<(), VersionError> {
if self.0 != version {
Err(VersionError {
expected: vec![version],
got: self.0,
})
} else {
Ok(())
}
}
/// `versions` needs to be sorted!
pub fn ensure_versions(&self, versions: &[Uuid]) -> Result<(), VersionError> {
if versions.binary_search(&self.0).is_err() {
Err(VersionError {
expected: versions.to_owned(),
got: self.0,
})
} else {
Ok(())
}
}
pub fn as_version_bytes_ref(&self) -> VersionBytesRef<'_> {
VersionBytesRef::new(self.0, &self.1)
}
pub fn buf(&self) -> VersionBytesBuf<'_> {
VersionBytesBuf::new(self.0, &self.1)
}
}
impl From<VersionBytes> for Vec<u8> {
fn from(v: VersionBytes) -> Vec<u8> {
v.1
}
}
impl From<VersionBytesRef<'_>> for VersionBytes {
fn from(v: VersionBytesRef<'_>) -> VersionBytes {
VersionBytes::new(v.0, v.1.into())
}
}
impl AsRef<[u8]> for VersionBytes {
fn as_ref(&self) -> &[u8] {
self.1.as_ref()
}
}
impl TryFrom<&[u8]> for VersionBytes {
type Error = ParseError;
fn try_from(buf: &[u8]) -> Result<VersionBytes, ParseError> {
Ok(VersionBytesRef::try_from(buf)?.into())
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct VersionBytesRef<'a>(
Uuid,
#[serde(borrow)]
#[serde(with = "serde_bytes")]
Cow<'a, [u8]>,
);
impl<'a> VersionBytesRef<'a> {
pub fn new(version: Uuid, content: &'a [u8]) -> VersionBytesRef<'a> {
VersionBytesRef(version, Cow::Borrowed(content))
}
pub fn version(&self) -> Uuid {
self.0
}
pub fn ensure_version(&self, version: Uuid) -> Result<(), VersionError> {
if self.0 != version {
Err(VersionError {
expected: vec![version],
got: self.0,
})
} else {
Ok(())
}
}
/// `versions` needs to be sorted!
pub fn ensure_versions(&self, versions: &[Uuid]) -> Result<(), VersionError> {
if versions.binary_search(&self.0).is_err() {
Err(VersionError {
expected: versions.to_owned(),
got: self.0,
})
} else {
Ok(())
}
}
pub fn buf(&self) -> VersionBytesBuf<'_> {
VersionBytesBuf::new(self.0, &self.1)
}
}
impl<'a> AsRef<[u8]> for VersionBytesRef<'a> {
fn as_ref(&self) -> &[u8] {
self.1.as_ref()
}
}
impl<'a> From<&'a VersionBytes> for VersionBytesRef<'a> {
fn from(v: &'a VersionBytes) -> VersionBytesRef<'a> {
VersionBytesRef::new(v.0, &v.1)
}
}
impl<'a> TryFrom<&'a [u8]> for VersionBytesRef<'a> {
type Error = ParseError;
fn try_from(buf: &'a [u8]) -> Result<VersionBytesRef<'a>, ParseError> {
if buf.len() < BUF_VERSION_LEN_BYTES {
return Err(ParseError::InvalidLength);
}
let mut version = [0; 16];
version.copy_from_slice(&buf[0..16]);
let version = Uuid::from_bytes(version);
let mut len = [0; 8];
len.copy_from_slice(&buf[16..24]);
let len =
usize::try_from(u64::from_le_bytes(len)).map_err(|_| ParseError::InvalidLength)?;
// TODO: check for max len?
if buf.len() - BUF_VERSION_LEN_BYTES != len {
return Err(ParseError::InvalidLength);
}
Ok(VersionBytesRef::new(version, &buf[BUF_VERSION_LEN_BYTES..]))
}
}
#[derive(Debug)]
#[non_exhaustive]
pub enum ParseError {
InvalidLength,
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"invalid length".fmt(f)
}
}
impl std::error::Error for ParseError {}
const BUF_VERSION_LEN_BYTES: usize = 16 + 8;
#[derive(Debug, Clone)]
pub struct VersionBytesBuf<'a> {
pos: usize,
version_len: [u8; BUF_VERSION_LEN_BYTES],
content: &'a [u8],
}
impl<'a> VersionBytesBuf<'a> {
pub fn new(version: Uuid, content: &'a [u8]) -> VersionBytesBuf<'a> {
let mut version_len = [0; BUF_VERSION_LEN_BYTES];
version_len[0..16].copy_from_slice(version.as_bytes());
let len = u64::try_from(content.len()).expect("Could not convert len (usize) into u64");
version_len[16..].copy_from_slice(&len.to_le_bytes());
VersionBytesBuf {
pos: 0,
version_len,
content,
}
}
}
impl<'a> ::bytes::Buf for VersionBytesBuf<'a> {
fn remaining(&self) -> usize {
BUF_VERSION_LEN_BYTES + self.content.len() - self.pos
}
fn bytes(&self) -> &[u8] {
if self.pos < BUF_VERSION_LEN_BYTES {
&self.version_len[self.pos..]
} else {
let pos = self.pos - BUF_VERSION_LEN_BYTES;
if self.content.len() <= pos {
&[]
} else {
&self.content[pos..]
}
}
}
fn advance(&mut self, cnt: usize) {
assert!(cnt <= self.remaining());
self.pos += cnt;
}
fn bytes_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize {
// TODO: TESTING!
if dst.len() == 0 {
return 0;
}
if self.pos < BUF_VERSION_LEN_BYTES {
dst[0] = IoSlice::new(&self.version_len[self.pos..]);
if dst.len() == 1 {
1
} else {
dst[1] = IoSlice::new(self.content);
2
}
} else {
let pos = self.pos - BUF_VERSION_LEN_BYTES;
if self.content.len() <= pos {
0
} else {
dst[0] = IoSlice::new(&self.content[pos..]);
1
}
}
}
}