move contents of taskchampion repo to tc/
This commit is contained in:
406
rust/taskchampion/src/taskdb/apply.rs
Normal file
406
rust/taskchampion/src/taskdb/apply.rs
Normal file
@@ -0,0 +1,406 @@
|
||||
use crate::errors::Error;
|
||||
use crate::server::SyncOp;
|
||||
use crate::storage::{ReplicaOp, StorageTxn, TaskMap};
|
||||
|
||||
/// Apply the given SyncOp to the replica, updating both the task data and adding a
|
||||
/// ReplicaOp to the list of operations. Returns the TaskMap of the task after the
|
||||
/// operation has been applied (or an empty TaskMap for Delete). It is not an error
|
||||
/// to create an existing task, nor to delete a nonexistent task.
|
||||
pub(super) fn apply_and_record(txn: &mut dyn StorageTxn, op: SyncOp) -> anyhow::Result<TaskMap> {
|
||||
match op {
|
||||
SyncOp::Create { uuid } => {
|
||||
let created = txn.create_task(uuid)?;
|
||||
if created {
|
||||
txn.add_operation(ReplicaOp::Create { uuid })?;
|
||||
txn.commit()?;
|
||||
Ok(TaskMap::new())
|
||||
} else {
|
||||
Ok(txn
|
||||
.get_task(uuid)?
|
||||
.expect("create_task failed but task does not exist"))
|
||||
}
|
||||
}
|
||||
SyncOp::Delete { uuid } => {
|
||||
let task = txn.get_task(uuid)?;
|
||||
if let Some(task) = task {
|
||||
txn.delete_task(uuid)?;
|
||||
txn.add_operation(ReplicaOp::Delete {
|
||||
uuid,
|
||||
old_task: task,
|
||||
})?;
|
||||
txn.commit()?;
|
||||
Ok(TaskMap::new())
|
||||
} else {
|
||||
Ok(TaskMap::new())
|
||||
}
|
||||
}
|
||||
SyncOp::Update {
|
||||
uuid,
|
||||
property,
|
||||
value,
|
||||
timestamp,
|
||||
} => {
|
||||
let task = txn.get_task(uuid)?;
|
||||
if let Some(mut task) = task {
|
||||
let old_value = task.get(&property).cloned();
|
||||
if let Some(ref v) = value {
|
||||
task.insert(property.clone(), v.clone());
|
||||
} else {
|
||||
task.remove(&property);
|
||||
}
|
||||
txn.set_task(uuid, task.clone())?;
|
||||
txn.add_operation(ReplicaOp::Update {
|
||||
uuid,
|
||||
property,
|
||||
old_value,
|
||||
value,
|
||||
timestamp,
|
||||
})?;
|
||||
txn.commit()?;
|
||||
Ok(task)
|
||||
} else {
|
||||
Err(Error::Database(format!("Task {} does not exist", uuid)).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply an op to the TaskDb's set of tasks (without recording it in the list of operations)
|
||||
pub(super) fn apply_op(txn: &mut dyn StorageTxn, op: &SyncOp) -> anyhow::Result<()> {
|
||||
// TODO: test
|
||||
// TODO: it'd be nice if this was integrated into apply() somehow, but that clones TaskMaps
|
||||
// unnecessariliy
|
||||
match op {
|
||||
SyncOp::Create { uuid } => {
|
||||
// insert if the task does not already exist
|
||||
if !txn.create_task(*uuid)? {
|
||||
return Err(Error::Database(format!("Task {} already exists", uuid)).into());
|
||||
}
|
||||
}
|
||||
SyncOp::Delete { ref uuid } => {
|
||||
if !txn.delete_task(*uuid)? {
|
||||
return Err(Error::Database(format!("Task {} does not exist", uuid)).into());
|
||||
}
|
||||
}
|
||||
SyncOp::Update {
|
||||
ref uuid,
|
||||
ref property,
|
||||
ref value,
|
||||
timestamp: _,
|
||||
} => {
|
||||
// update if this task exists, otherwise ignore
|
||||
if let Some(mut task) = txn.get_task(*uuid)? {
|
||||
match value {
|
||||
Some(ref val) => task.insert(property.to_string(), val.clone()),
|
||||
None => task.remove(property),
|
||||
};
|
||||
txn.set_task(*uuid, task)?;
|
||||
} else {
|
||||
return Err(Error::Database(format!("Task {} does not exist", uuid)).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::storage::TaskMap;
|
||||
use crate::taskdb::TaskDb;
|
||||
use chrono::Utc;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::collections::HashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[test]
|
||||
fn test_apply_create() -> anyhow::Result<()> {
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
let uuid = Uuid::new_v4();
|
||||
let op = SyncOp::Create { uuid };
|
||||
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op)?;
|
||||
assert_eq!(taskmap.len(), 0);
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
assert_eq!(db.sorted_tasks(), vec![(uuid, vec![]),]);
|
||||
assert_eq!(db.operations(), vec![ReplicaOp::Create { uuid }]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_create_exists() -> anyhow::Result<()> {
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
let uuid = Uuid::new_v4();
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
txn.create_task(uuid)?;
|
||||
let mut taskmap = TaskMap::new();
|
||||
taskmap.insert("foo".into(), "bar".into());
|
||||
txn.set_task(uuid, taskmap)?;
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
let op = SyncOp::Create { uuid };
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op.clone())?;
|
||||
|
||||
assert_eq!(taskmap.len(), 1);
|
||||
assert_eq!(taskmap.get("foo").unwrap(), "bar");
|
||||
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
// create did not delete the old task..
|
||||
assert_eq!(
|
||||
db.sorted_tasks(),
|
||||
vec![(uuid, vec![("foo".into(), "bar".into())])]
|
||||
);
|
||||
// create was done "manually" above, and no new op was added
|
||||
assert_eq!(db.operations(), vec![]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_create_update() -> anyhow::Result<()> {
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
let uuid = Uuid::new_v4();
|
||||
let now = Utc::now();
|
||||
let op1 = SyncOp::Create { uuid };
|
||||
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op1)?;
|
||||
assert_eq!(taskmap.len(), 0);
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
let op2 = SyncOp::Update {
|
||||
uuid,
|
||||
property: String::from("title"),
|
||||
value: Some("my task".into()),
|
||||
timestamp: now,
|
||||
};
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let mut taskmap = apply_and_record(txn.as_mut(), op2)?;
|
||||
assert_eq!(
|
||||
taskmap.drain().collect::<Vec<(_, _)>>(),
|
||||
vec![("title".into(), "my task".into())]
|
||||
);
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
db.sorted_tasks(),
|
||||
vec![(uuid, vec![("title".into(), "my task".into())])]
|
||||
);
|
||||
assert_eq!(
|
||||
db.operations(),
|
||||
vec![
|
||||
ReplicaOp::Create { uuid },
|
||||
ReplicaOp::Update {
|
||||
uuid,
|
||||
property: "title".into(),
|
||||
old_value: None,
|
||||
value: Some("my task".into()),
|
||||
timestamp: now
|
||||
}
|
||||
]
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_create_update_delete_prop() -> anyhow::Result<()> {
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
let uuid = Uuid::new_v4();
|
||||
let now = Utc::now();
|
||||
let op1 = SyncOp::Create { uuid };
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op1)?;
|
||||
assert_eq!(taskmap.len(), 0);
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
let op2 = SyncOp::Update {
|
||||
uuid,
|
||||
property: String::from("title"),
|
||||
value: Some("my task".into()),
|
||||
timestamp: now,
|
||||
};
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op2)?;
|
||||
assert_eq!(taskmap.get("title"), Some(&"my task".to_owned()));
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
let op3 = SyncOp::Update {
|
||||
uuid,
|
||||
property: String::from("priority"),
|
||||
value: Some("H".into()),
|
||||
timestamp: now,
|
||||
};
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op3)?;
|
||||
assert_eq!(taskmap.get("priority"), Some(&"H".to_owned()));
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
let op4 = SyncOp::Update {
|
||||
uuid,
|
||||
property: String::from("title"),
|
||||
value: None,
|
||||
timestamp: now,
|
||||
};
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op4)?;
|
||||
assert_eq!(taskmap.get("title"), None);
|
||||
assert_eq!(taskmap.get("priority"), Some(&"H".to_owned()));
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
let mut exp = HashMap::new();
|
||||
let mut task = HashMap::new();
|
||||
task.insert(String::from("priority"), String::from("H"));
|
||||
exp.insert(uuid, task);
|
||||
assert_eq!(
|
||||
db.sorted_tasks(),
|
||||
vec![(uuid, vec![("priority".into(), "H".into())])]
|
||||
);
|
||||
assert_eq!(
|
||||
db.operations(),
|
||||
vec![
|
||||
ReplicaOp::Create { uuid },
|
||||
ReplicaOp::Update {
|
||||
uuid,
|
||||
property: "title".into(),
|
||||
old_value: None,
|
||||
value: Some("my task".into()),
|
||||
timestamp: now,
|
||||
},
|
||||
ReplicaOp::Update {
|
||||
uuid,
|
||||
property: "priority".into(),
|
||||
old_value: None,
|
||||
value: Some("H".into()),
|
||||
timestamp: now,
|
||||
},
|
||||
ReplicaOp::Update {
|
||||
uuid,
|
||||
property: "title".into(),
|
||||
old_value: Some("my task".into()),
|
||||
value: None,
|
||||
timestamp: now,
|
||||
}
|
||||
]
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_update_does_not_exist() -> anyhow::Result<()> {
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
let uuid = Uuid::new_v4();
|
||||
let op = SyncOp::Update {
|
||||
uuid,
|
||||
property: String::from("title"),
|
||||
value: Some("my task".into()),
|
||||
timestamp: Utc::now(),
|
||||
};
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
assert_eq!(
|
||||
apply_and_record(txn.as_mut(), op)
|
||||
.err()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
format!("Task Database Error: Task {} does not exist", uuid)
|
||||
);
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_create_delete() -> anyhow::Result<()> {
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
let uuid = Uuid::new_v4();
|
||||
let now = Utc::now();
|
||||
|
||||
let op1 = SyncOp::Create { uuid };
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op1)?;
|
||||
assert_eq!(taskmap.len(), 0);
|
||||
}
|
||||
|
||||
let op2 = SyncOp::Update {
|
||||
uuid,
|
||||
property: String::from("priority"),
|
||||
value: Some("H".into()),
|
||||
timestamp: now,
|
||||
};
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op2)?;
|
||||
assert_eq!(taskmap.get("priority"), Some(&"H".to_owned()));
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
let op3 = SyncOp::Delete { uuid };
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op3)?;
|
||||
assert_eq!(taskmap.len(), 0);
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
assert_eq!(db.sorted_tasks(), vec![]);
|
||||
let mut old_task = TaskMap::new();
|
||||
old_task.insert("priority".into(), "H".into());
|
||||
assert_eq!(
|
||||
db.operations(),
|
||||
vec![
|
||||
ReplicaOp::Create { uuid },
|
||||
ReplicaOp::Update {
|
||||
uuid,
|
||||
property: "priority".into(),
|
||||
old_value: None,
|
||||
value: Some("H".into()),
|
||||
timestamp: now,
|
||||
},
|
||||
ReplicaOp::Delete { uuid, old_task },
|
||||
]
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_delete_not_present() -> anyhow::Result<()> {
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
let uuid = Uuid::new_v4();
|
||||
let op = SyncOp::Delete { uuid };
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
let taskmap = apply_and_record(txn.as_mut(), op)?;
|
||||
assert_eq!(taskmap.len(), 0);
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
263
rust/taskchampion/src/taskdb/mod.rs
Normal file
263
rust/taskchampion/src/taskdb/mod.rs
Normal file
@@ -0,0 +1,263 @@
|
||||
use crate::server::{Server, SyncOp};
|
||||
use crate::storage::{ReplicaOp, Storage, TaskMap};
|
||||
use uuid::Uuid;
|
||||
|
||||
mod apply;
|
||||
mod snapshot;
|
||||
mod sync;
|
||||
mod undo;
|
||||
mod working_set;
|
||||
|
||||
/// A TaskDb is the backend for a replica. It manages the storage, operations, synchronization,
|
||||
/// and so on, and all the invariants that come with it. It leaves the meaning of particular task
|
||||
/// properties to the replica and task implementations.
|
||||
pub struct TaskDb {
|
||||
storage: Box<dyn Storage>,
|
||||
}
|
||||
|
||||
impl TaskDb {
|
||||
/// Create a new TaskDb with the given backend storage
|
||||
pub fn new(storage: Box<dyn Storage>) -> TaskDb {
|
||||
TaskDb { storage }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn new_inmemory() -> TaskDb {
|
||||
#[cfg(test)]
|
||||
use crate::storage::InMemoryStorage;
|
||||
|
||||
TaskDb::new(Box::new(InMemoryStorage::new()))
|
||||
}
|
||||
|
||||
/// Apply an operation to the TaskDb. This will update the set of tasks and add a ReplicaOp to
|
||||
/// the set of operations in the TaskDb, and return the TaskMap containing the resulting task's
|
||||
/// properties (or an empty TaskMap for deletion).
|
||||
///
|
||||
/// Aside from synchronization operations, this is the only way to modify the TaskDb. In cases
|
||||
/// where an operation does not make sense, this function will do nothing and return an error
|
||||
/// (but leave the TaskDb in a consistent state).
|
||||
pub fn apply(&mut self, op: SyncOp) -> anyhow::Result<TaskMap> {
|
||||
let mut txn = self.storage.txn()?;
|
||||
apply::apply_and_record(txn.as_mut(), op)
|
||||
}
|
||||
|
||||
/// Add an UndoPoint operation to the list of replica operations.
|
||||
pub fn add_undo_point(&mut self) -> anyhow::Result<()> {
|
||||
let mut txn = self.storage.txn()?;
|
||||
txn.add_operation(ReplicaOp::UndoPoint)?;
|
||||
txn.commit()
|
||||
}
|
||||
|
||||
/// Get all tasks.
|
||||
pub fn all_tasks(&mut self) -> anyhow::Result<Vec<(Uuid, TaskMap)>> {
|
||||
let mut txn = self.storage.txn()?;
|
||||
txn.all_tasks()
|
||||
}
|
||||
|
||||
/// Get the UUIDs of all tasks
|
||||
pub fn all_task_uuids(&mut self) -> anyhow::Result<Vec<Uuid>> {
|
||||
let mut txn = self.storage.txn()?;
|
||||
txn.all_task_uuids()
|
||||
}
|
||||
|
||||
/// Get the working set
|
||||
pub fn working_set(&mut self) -> anyhow::Result<Vec<Option<Uuid>>> {
|
||||
let mut txn = self.storage.txn()?;
|
||||
txn.get_working_set()
|
||||
}
|
||||
|
||||
/// Get a single task, by uuid.
|
||||
pub fn get_task(&mut self, uuid: Uuid) -> anyhow::Result<Option<TaskMap>> {
|
||||
let mut txn = self.storage.txn()?;
|
||||
txn.get_task(uuid)
|
||||
}
|
||||
|
||||
/// Rebuild the working set using a function to identify tasks that should be in the set. This
|
||||
/// renumbers the existing working-set tasks to eliminate gaps, and also adds any tasks that
|
||||
/// are not already in the working set but should be. The rebuild occurs in a single
|
||||
/// trasnsaction against the storage backend.
|
||||
pub fn rebuild_working_set<F>(
|
||||
&mut self,
|
||||
in_working_set: F,
|
||||
renumber: bool,
|
||||
) -> anyhow::Result<()>
|
||||
where
|
||||
F: Fn(&TaskMap) -> bool,
|
||||
{
|
||||
working_set::rebuild(self.storage.txn()?.as_mut(), in_working_set, renumber)
|
||||
}
|
||||
|
||||
/// Add the given uuid to the working set and return its index; if it is already in the working
|
||||
/// set, its index is returned. This does *not* renumber any existing tasks.
|
||||
pub fn add_to_working_set(&mut self, uuid: Uuid) -> anyhow::Result<usize> {
|
||||
let mut txn = self.storage.txn()?;
|
||||
// search for an existing entry for this task..
|
||||
for (i, elt) in txn.get_working_set()?.iter().enumerate() {
|
||||
if *elt == Some(uuid) {
|
||||
// (note that this drops the transaction with no changes made)
|
||||
return Ok(i);
|
||||
}
|
||||
}
|
||||
// and if not found, add one
|
||||
let i = txn.add_to_working_set(uuid)?;
|
||||
txn.commit()?;
|
||||
Ok(i)
|
||||
}
|
||||
|
||||
/// Sync to the given server, pulling remote changes and pushing local changes.
|
||||
///
|
||||
/// If `avoid_snapshots` is true, the sync operations produces a snapshot only when the server
|
||||
/// indicate it is urgent (snapshot urgency "high"). This allows time for other replicas to
|
||||
/// create a snapshot before this one does.
|
||||
///
|
||||
/// Set this to true on systems more constrained in CPU, memory, or bandwidth than a typical desktop
|
||||
/// system
|
||||
pub fn sync(
|
||||
&mut self,
|
||||
server: &mut Box<dyn Server>,
|
||||
avoid_snapshots: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut txn = self.storage.txn()?;
|
||||
sync::sync(server, txn.as_mut(), avoid_snapshots)
|
||||
}
|
||||
|
||||
/// Undo local operations until the most recent UndoPoint, returning false if there are no
|
||||
/// local operations to undo.
|
||||
pub fn undo(&mut self) -> anyhow::Result<bool> {
|
||||
let mut txn = self.storage.txn()?;
|
||||
undo::undo(txn.as_mut())
|
||||
}
|
||||
|
||||
/// Get the number of un-synchronized operations in storage.
|
||||
pub fn num_operations(&mut self) -> anyhow::Result<usize> {
|
||||
let mut txn = self.storage.txn().unwrap();
|
||||
txn.num_operations()
|
||||
}
|
||||
|
||||
// functions for supporting tests
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn sorted_tasks(&mut self) -> Vec<(Uuid, Vec<(String, String)>)> {
|
||||
let mut res: Vec<(Uuid, Vec<(String, String)>)> = self
|
||||
.all_tasks()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|(u, t)| {
|
||||
let mut t = t
|
||||
.iter()
|
||||
.map(|(p, v)| (p.clone(), v.clone()))
|
||||
.collect::<Vec<(String, String)>>();
|
||||
t.sort();
|
||||
(u.clone(), t)
|
||||
})
|
||||
.collect();
|
||||
res.sort();
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn operations(&mut self) -> Vec<ReplicaOp> {
|
||||
let mut txn = self.storage.txn().unwrap();
|
||||
txn.operations()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.map(|o| o.clone())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::server::test::TestServer;
|
||||
use crate::storage::{InMemoryStorage, ReplicaOp};
|
||||
use chrono::Utc;
|
||||
use pretty_assertions::assert_eq;
|
||||
use proptest::prelude::*;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[test]
|
||||
fn test_apply() {
|
||||
// this verifies that the operation is both applied and included in the list of
|
||||
// operations; more detailed tests are in the `apply` module.
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
let uuid = Uuid::new_v4();
|
||||
let op = SyncOp::Create { uuid };
|
||||
db.apply(op.clone()).unwrap();
|
||||
|
||||
assert_eq!(db.sorted_tasks(), vec![(uuid, vec![]),]);
|
||||
assert_eq!(db.operations(), vec![ReplicaOp::Create { uuid }]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_undo_point() {
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
db.add_undo_point().unwrap();
|
||||
assert_eq!(db.operations(), vec![ReplicaOp::UndoPoint]);
|
||||
}
|
||||
|
||||
fn newdb() -> TaskDb {
|
||||
TaskDb::new(Box::new(InMemoryStorage::new()))
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Action {
|
||||
Op(SyncOp),
|
||||
Sync,
|
||||
}
|
||||
|
||||
fn action_sequence_strategy() -> impl Strategy<Value = Vec<(Action, u8)>> {
|
||||
// Create, Update, Delete, or Sync on client 1, 2, .., followed by a round of syncs
|
||||
"([CUDS][123])*S1S2S3S1S2".prop_map(|seq| {
|
||||
let uuid = Uuid::parse_str("83a2f9ef-f455-4195-b92e-a54c161eebfc").unwrap();
|
||||
seq.as_bytes()
|
||||
.chunks(2)
|
||||
.map(|action_on| {
|
||||
let action = match action_on[0] {
|
||||
b'C' => Action::Op(SyncOp::Create { uuid }),
|
||||
b'U' => Action::Op(SyncOp::Update {
|
||||
uuid,
|
||||
property: "title".into(),
|
||||
value: Some("foo".into()),
|
||||
timestamp: Utc::now(),
|
||||
}),
|
||||
b'D' => Action::Op(SyncOp::Delete { uuid }),
|
||||
b'S' => Action::Sync,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let acton = action_on[1] - b'1';
|
||||
(action, acton)
|
||||
})
|
||||
.collect::<Vec<(Action, u8)>>()
|
||||
})
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
// check that various sequences of operations on mulitple db's do not get the db's into an
|
||||
// incompatible state. The main concern here is that there might be a sequence of create
|
||||
// and delete operations that results in a task existing in one TaskDb but not existing in
|
||||
// another. So, the generated sequences focus on a single task UUID.
|
||||
fn transform_sequences_of_operations(action_sequence in action_sequence_strategy()) {
|
||||
let mut server: Box<dyn Server> = Box::new(TestServer::new());
|
||||
let mut dbs = [newdb(), newdb(), newdb()];
|
||||
|
||||
for (action, db) in action_sequence {
|
||||
println!("{:?} on db {}", action, db);
|
||||
|
||||
let db = &mut dbs[db as usize];
|
||||
match action {
|
||||
Action::Op(op) => {
|
||||
if let Err(e) = db.apply(op) {
|
||||
println!(" {:?} (ignored)", e);
|
||||
}
|
||||
},
|
||||
Action::Sync => db.sync(&mut server, false).unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(dbs[0].sorted_tasks(), dbs[0].sorted_tasks());
|
||||
assert_eq!(dbs[1].sorted_tasks(), dbs[2].sorted_tasks());
|
||||
}
|
||||
}
|
||||
}
|
||||
178
rust/taskchampion/src/taskdb/snapshot.rs
Normal file
178
rust/taskchampion/src/taskdb/snapshot.rs
Normal file
@@ -0,0 +1,178 @@
|
||||
use crate::storage::{StorageTxn, TaskMap, VersionId};
|
||||
use flate2::{read::ZlibDecoder, write::ZlibEncoder, Compression};
|
||||
use serde::de::{Deserialize, Deserializer, MapAccess, Visitor};
|
||||
use serde::ser::{Serialize, SerializeMap, Serializer};
|
||||
use std::fmt;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// A newtype to wrap the result of [`crate::storage::StorageTxn::all_tasks`]
|
||||
pub(super) struct SnapshotTasks(Vec<(Uuid, TaskMap)>);
|
||||
|
||||
impl Serialize for SnapshotTasks {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut map = serializer.serialize_map(Some(self.0.len()))?;
|
||||
for (k, v) in &self.0 {
|
||||
map.serialize_entry(k, v)?;
|
||||
}
|
||||
map.end()
|
||||
}
|
||||
}
|
||||
|
||||
struct TaskDbVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for TaskDbVisitor {
|
||||
type Value = SnapshotTasks;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("a map representing a task snapshot")
|
||||
}
|
||||
|
||||
fn visit_map<M>(self, mut access: M) -> Result<Self::Value, M::Error>
|
||||
where
|
||||
M: MapAccess<'de>,
|
||||
{
|
||||
let mut map = SnapshotTasks(Vec::with_capacity(access.size_hint().unwrap_or(0)));
|
||||
|
||||
while let Some((key, value)) = access.next_entry()? {
|
||||
map.0.push((key, value));
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for SnapshotTasks {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_map(TaskDbVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
impl SnapshotTasks {
|
||||
pub(super) fn encode(&self) -> anyhow::Result<Vec<u8>> {
|
||||
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
|
||||
serde_json::to_writer(&mut encoder, &self)?;
|
||||
Ok(encoder.finish()?)
|
||||
}
|
||||
|
||||
pub(super) fn decode(snapshot: &[u8]) -> anyhow::Result<Self> {
|
||||
let decoder = ZlibDecoder::new(snapshot);
|
||||
Ok(serde_json::from_reader(decoder)?)
|
||||
}
|
||||
|
||||
pub(super) fn into_inner(self) -> Vec<(Uuid, TaskMap)> {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a snapshot (compressed, unencrypted) for the current state of the taskdb in the given
|
||||
/// storage.
|
||||
pub(super) fn make_snapshot(txn: &mut dyn StorageTxn) -> anyhow::Result<Vec<u8>> {
|
||||
let all_tasks = SnapshotTasks(txn.all_tasks()?);
|
||||
all_tasks.encode()
|
||||
}
|
||||
|
||||
/// Apply the given snapshot (compressed, unencrypted) to the taskdb's storage.
|
||||
pub(super) fn apply_snapshot(
|
||||
txn: &mut dyn StorageTxn,
|
||||
version: VersionId,
|
||||
snapshot: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
let all_tasks = SnapshotTasks::decode(snapshot)?;
|
||||
|
||||
// double-check emptiness
|
||||
if !txn.is_empty()? {
|
||||
anyhow::bail!("Cannot apply snapshot to a non-empty task database");
|
||||
}
|
||||
|
||||
for (uuid, task) in all_tasks.into_inner().drain(..) {
|
||||
txn.set_task(uuid, task)?;
|
||||
}
|
||||
txn.set_base_version(version)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::storage::{InMemoryStorage, Storage, TaskMap};
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
#[test]
|
||||
fn test_serialize_empty() -> anyhow::Result<()> {
|
||||
let empty = SnapshotTasks(vec![]);
|
||||
assert_eq!(serde_json::to_vec(&empty)?, b"{}".to_owned());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialize_tasks() -> anyhow::Result<()> {
|
||||
let u = Uuid::new_v4();
|
||||
let m: TaskMap = vec![("description".to_owned(), "my task".to_owned())]
|
||||
.drain(..)
|
||||
.collect();
|
||||
let all_tasks = SnapshotTasks(vec![(u, m)]);
|
||||
assert_eq!(
|
||||
serde_json::to_vec(&all_tasks)?,
|
||||
format!("{{\"{}\":{{\"description\":\"my task\"}}}}", u).into_bytes(),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_round_trip() -> anyhow::Result<()> {
|
||||
let mut storage = InMemoryStorage::new();
|
||||
let version = Uuid::new_v4();
|
||||
|
||||
let task1 = (
|
||||
Uuid::new_v4(),
|
||||
vec![("description".to_owned(), "one".to_owned())]
|
||||
.drain(..)
|
||||
.collect::<TaskMap>(),
|
||||
);
|
||||
let task2 = (
|
||||
Uuid::new_v4(),
|
||||
vec![("description".to_owned(), "two".to_owned())]
|
||||
.drain(..)
|
||||
.collect::<TaskMap>(),
|
||||
);
|
||||
|
||||
{
|
||||
let mut txn = storage.txn()?;
|
||||
txn.set_task(task1.0, task1.1.clone())?;
|
||||
txn.set_task(task2.0, task2.1.clone())?;
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
let snap = {
|
||||
let mut txn = storage.txn()?;
|
||||
make_snapshot(txn.as_mut())?
|
||||
};
|
||||
|
||||
// apply that snapshot to a fresh bit of fake
|
||||
let mut storage = InMemoryStorage::new();
|
||||
{
|
||||
let mut txn = storage.txn()?;
|
||||
apply_snapshot(txn.as_mut(), version, &snap)?;
|
||||
txn.commit()?
|
||||
}
|
||||
|
||||
{
|
||||
let mut txn = storage.txn()?;
|
||||
assert_eq!(txn.get_task(task1.0)?, Some(task1.1));
|
||||
assert_eq!(txn.get_task(task2.0)?, Some(task2.1));
|
||||
assert_eq!(txn.all_tasks()?.len(), 2);
|
||||
assert_eq!(txn.base_version()?, version);
|
||||
assert_eq!(txn.operations()?.len(), 0);
|
||||
assert_eq!(txn.get_working_set()?.len(), 1);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
385
rust/taskchampion/src/taskdb/sync.rs
Normal file
385
rust/taskchampion/src/taskdb/sync.rs
Normal file
@@ -0,0 +1,385 @@
|
||||
use super::{apply, snapshot};
|
||||
use crate::server::{AddVersionResult, GetVersionResult, Server, SnapshotUrgency, SyncOp};
|
||||
use crate::storage::StorageTxn;
|
||||
use crate::Error;
|
||||
use log::{info, trace, warn};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::str;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
struct Version {
|
||||
operations: Vec<SyncOp>,
|
||||
}
|
||||
|
||||
/// Sync to the given server, pulling remote changes and pushing local changes.
|
||||
pub(super) fn sync(
|
||||
server: &mut Box<dyn Server>,
|
||||
txn: &mut dyn StorageTxn,
|
||||
avoid_snapshots: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
// if this taskdb is entirely empty, then start by getting and applying a snapshot
|
||||
if txn.is_empty()? {
|
||||
trace!("storage is empty; attempting to apply a snapshot");
|
||||
if let Some((version, snap)) = server.get_snapshot()? {
|
||||
snapshot::apply_snapshot(txn, version, snap.as_ref())?;
|
||||
trace!("applied snapshot for version {}", version);
|
||||
}
|
||||
}
|
||||
|
||||
// retry synchronizing until the server accepts our version (this allows for races between
|
||||
// replicas trying to sync to the same server). If the server insists on the same base
|
||||
// version twice, then we have diverged.
|
||||
let mut requested_parent_version_id = None;
|
||||
loop {
|
||||
trace!("beginning sync outer loop");
|
||||
let mut base_version_id = txn.base_version()?;
|
||||
|
||||
let mut local_ops: Vec<SyncOp> = txn
|
||||
.operations()?
|
||||
.drain(..)
|
||||
.filter_map(|op| op.into_sync())
|
||||
.collect();
|
||||
|
||||
// first pull changes and "rebase" on top of them
|
||||
loop {
|
||||
trace!("beginning sync inner loop");
|
||||
if let GetVersionResult::Version {
|
||||
version_id,
|
||||
history_segment,
|
||||
..
|
||||
} = server.get_child_version(base_version_id)?
|
||||
{
|
||||
let version_str = str::from_utf8(&history_segment).unwrap();
|
||||
let version: Version = serde_json::from_str(version_str).unwrap();
|
||||
|
||||
// apply this verison and update base_version in storage
|
||||
info!("applying version {:?} from server", version_id);
|
||||
apply_version(txn, &mut local_ops, version)?;
|
||||
txn.set_base_version(version_id)?;
|
||||
base_version_id = version_id;
|
||||
} else {
|
||||
info!("no child versions of {:?}", base_version_id);
|
||||
// at the moment, no more child versions, so we can try adding our own
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if local_ops.is_empty() {
|
||||
info!("no changes to push to server");
|
||||
// nothing to sync back to the server..
|
||||
break;
|
||||
}
|
||||
|
||||
trace!("sending {} operations to the server", local_ops.len());
|
||||
|
||||
// now make a version of our local changes and push those
|
||||
let new_version = Version {
|
||||
operations: local_ops,
|
||||
};
|
||||
let history_segment = serde_json::to_string(&new_version).unwrap().into();
|
||||
info!("sending new version to server");
|
||||
let (res, snapshot_urgency) = server.add_version(base_version_id, history_segment)?;
|
||||
match res {
|
||||
AddVersionResult::Ok(new_version_id) => {
|
||||
info!("version {:?} received by server", new_version_id);
|
||||
txn.set_base_version(new_version_id)?;
|
||||
|
||||
// make a snapshot if the server indicates it is urgent enough
|
||||
let base_urgency = if avoid_snapshots {
|
||||
SnapshotUrgency::High
|
||||
} else {
|
||||
SnapshotUrgency::Low
|
||||
};
|
||||
if snapshot_urgency >= base_urgency {
|
||||
let snapshot = snapshot::make_snapshot(txn)?;
|
||||
server.add_snapshot(new_version_id, snapshot)?;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
AddVersionResult::ExpectedParentVersion(parent_version_id) => {
|
||||
info!(
|
||||
"new version rejected; must be based on {:?}",
|
||||
parent_version_id
|
||||
);
|
||||
if let Some(requested) = requested_parent_version_id {
|
||||
if parent_version_id == requested {
|
||||
return Err(Error::OutOfSync.into());
|
||||
}
|
||||
}
|
||||
requested_parent_version_id = Some(parent_version_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
txn.set_operations(vec![])?;
|
||||
txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn apply_version(
|
||||
txn: &mut dyn StorageTxn,
|
||||
local_ops: &mut Vec<SyncOp>,
|
||||
mut version: Version,
|
||||
) -> anyhow::Result<()> {
|
||||
// The situation here is that the server has already applied all server operations, and we
|
||||
// have already applied all local operations, so states have diverged by several
|
||||
// operations. We need to figure out what operations to apply locally and on the server in
|
||||
// order to return to the same state.
|
||||
//
|
||||
// Operational transforms provide this on an operation-by-operation basis. To break this
|
||||
// down, we treat each server operation individually, in order. For each such operation,
|
||||
// we start in this state:
|
||||
//
|
||||
//
|
||||
// base state-*
|
||||
// / \-server op
|
||||
// * *
|
||||
// local / \ /
|
||||
// ops * *
|
||||
// / \ / new
|
||||
// * * local
|
||||
// local / \ / ops
|
||||
// state-* *
|
||||
// new-\ /
|
||||
// server op *-new local state
|
||||
//
|
||||
// This is slightly complicated by the fact that the transform function can return None,
|
||||
// indicating no operation is required. If this happens for a local op, we can just omit
|
||||
// it. If it happens for server op, then we must copy the remaining local ops.
|
||||
for server_op in version.operations.drain(..) {
|
||||
trace!(
|
||||
"rebasing local operations onto server operation {:?}",
|
||||
server_op
|
||||
);
|
||||
let mut new_local_ops = Vec::with_capacity(local_ops.len());
|
||||
let mut svr_op = Some(server_op);
|
||||
for local_op in local_ops.drain(..) {
|
||||
if let Some(o) = svr_op {
|
||||
let (new_server_op, new_local_op) = SyncOp::transform(o, local_op.clone());
|
||||
trace!("local operation {:?} -> {:?}", local_op, new_local_op);
|
||||
svr_op = new_server_op;
|
||||
if let Some(o) = new_local_op {
|
||||
new_local_ops.push(o);
|
||||
}
|
||||
} else {
|
||||
trace!(
|
||||
"local operation {:?} unchanged (server operation consumed)",
|
||||
local_op
|
||||
);
|
||||
new_local_ops.push(local_op);
|
||||
}
|
||||
}
|
||||
if let Some(o) = svr_op {
|
||||
if let Err(e) = apply::apply_op(txn, &o) {
|
||||
warn!("Invalid operation when syncing: {} (ignored)", e);
|
||||
}
|
||||
}
|
||||
*local_ops = new_local_ops;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::server::{test::TestServer, SyncOp};
|
||||
use crate::storage::InMemoryStorage;
|
||||
use crate::taskdb::{snapshot::SnapshotTasks, TaskDb};
|
||||
use chrono::Utc;
|
||||
use pretty_assertions::assert_eq;
|
||||
use uuid::Uuid;
|
||||
|
||||
fn newdb() -> TaskDb {
|
||||
TaskDb::new(Box::new(InMemoryStorage::new()))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sync() -> anyhow::Result<()> {
|
||||
let mut server: Box<dyn Server> = TestServer::new().server();
|
||||
|
||||
let mut db1 = newdb();
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
|
||||
|
||||
let mut db2 = newdb();
|
||||
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
|
||||
|
||||
// make some changes in parallel to db1 and db2..
|
||||
let uuid1 = Uuid::new_v4();
|
||||
db1.apply(SyncOp::Create { uuid: uuid1 }).unwrap();
|
||||
db1.apply(SyncOp::Update {
|
||||
uuid: uuid1,
|
||||
property: "title".into(),
|
||||
value: Some("my first task".into()),
|
||||
timestamp: Utc::now(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let uuid2 = Uuid::new_v4();
|
||||
db2.apply(SyncOp::Create { uuid: uuid2 }).unwrap();
|
||||
db2.apply(SyncOp::Update {
|
||||
uuid: uuid2,
|
||||
property: "title".into(),
|
||||
value: Some("my second task".into()),
|
||||
timestamp: Utc::now(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// and synchronize those around
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
|
||||
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
|
||||
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
|
||||
|
||||
// now make updates to the same task on both sides
|
||||
db1.apply(SyncOp::Update {
|
||||
uuid: uuid2,
|
||||
property: "priority".into(),
|
||||
value: Some("H".into()),
|
||||
timestamp: Utc::now(),
|
||||
})
|
||||
.unwrap();
|
||||
db2.apply(SyncOp::Update {
|
||||
uuid: uuid2,
|
||||
property: "project".into(),
|
||||
value: Some("personal".into()),
|
||||
timestamp: Utc::now(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// and synchronize those around
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
|
||||
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
|
||||
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sync_create_delete() -> anyhow::Result<()> {
|
||||
let mut server: Box<dyn Server> = TestServer::new().server();
|
||||
|
||||
let mut db1 = newdb();
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
|
||||
|
||||
let mut db2 = newdb();
|
||||
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
|
||||
|
||||
// create and update a task..
|
||||
let uuid = Uuid::new_v4();
|
||||
db1.apply(SyncOp::Create { uuid }).unwrap();
|
||||
db1.apply(SyncOp::Update {
|
||||
uuid,
|
||||
property: "title".into(),
|
||||
value: Some("my first task".into()),
|
||||
timestamp: Utc::now(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// and synchronize those around
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
|
||||
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
|
||||
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
|
||||
|
||||
// delete and re-create the task on db1
|
||||
db1.apply(SyncOp::Delete { uuid }).unwrap();
|
||||
db1.apply(SyncOp::Create { uuid }).unwrap();
|
||||
db1.apply(SyncOp::Update {
|
||||
uuid,
|
||||
property: "title".into(),
|
||||
value: Some("my second task".into()),
|
||||
timestamp: Utc::now(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// and on db2, update a property of the task
|
||||
db2.apply(SyncOp::Update {
|
||||
uuid,
|
||||
property: "project".into(),
|
||||
value: Some("personal".into()),
|
||||
timestamp: Utc::now(),
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
|
||||
sync(&mut server, db2.storage.txn()?.as_mut(), false).unwrap();
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false).unwrap();
|
||||
assert_eq!(db1.sorted_tasks(), db2.sorted_tasks());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sync_add_snapshot_start_with_snapshot() -> anyhow::Result<()> {
|
||||
let mut test_server = TestServer::new();
|
||||
|
||||
let mut server: Box<dyn Server> = test_server.server();
|
||||
let mut db1 = newdb();
|
||||
|
||||
let uuid = Uuid::new_v4();
|
||||
db1.apply(SyncOp::Create { uuid })?;
|
||||
db1.apply(SyncOp::Update {
|
||||
uuid,
|
||||
property: "title".into(),
|
||||
value: Some("my first task".into()),
|
||||
timestamp: Utc::now(),
|
||||
})?;
|
||||
|
||||
test_server.set_snapshot_urgency(SnapshotUrgency::High);
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false)?;
|
||||
|
||||
// assert that a snapshot was added
|
||||
let base_version = db1.storage.txn()?.base_version()?;
|
||||
let (v, s) = test_server
|
||||
.snapshot()
|
||||
.ok_or_else(|| anyhow::anyhow!("no snapshot"))?;
|
||||
assert_eq!(v, base_version);
|
||||
|
||||
let tasks = SnapshotTasks::decode(&s)?.into_inner();
|
||||
assert_eq!(tasks[0].0, uuid);
|
||||
|
||||
// update the taskdb and sync again
|
||||
db1.apply(SyncOp::Update {
|
||||
uuid,
|
||||
property: "title".into(),
|
||||
value: Some("my first task, updated".into()),
|
||||
timestamp: Utc::now(),
|
||||
})?;
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), false)?;
|
||||
|
||||
// delete the first version, so that db2 *must* initialize from
|
||||
// the snapshot
|
||||
test_server.delete_version(Uuid::nil());
|
||||
|
||||
// sync to a new DB and check that we got the expected results
|
||||
let mut db2 = newdb();
|
||||
sync(&mut server, db2.storage.txn()?.as_mut(), false)?;
|
||||
|
||||
let task = db2.get_task(uuid)?.unwrap();
|
||||
assert_eq!(task.get("title").unwrap(), "my first task, updated");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sync_avoids_snapshot() -> anyhow::Result<()> {
|
||||
let test_server = TestServer::new();
|
||||
|
||||
let mut server: Box<dyn Server> = test_server.server();
|
||||
let mut db1 = newdb();
|
||||
|
||||
let uuid = Uuid::new_v4();
|
||||
db1.apply(SyncOp::Create { uuid }).unwrap();
|
||||
|
||||
test_server.set_snapshot_urgency(SnapshotUrgency::Low);
|
||||
sync(&mut server, db1.storage.txn()?.as_mut(), true).unwrap();
|
||||
|
||||
// assert that a snapshot was not added, because we indicated
|
||||
// we wanted to avoid snapshots and it was only low urgency
|
||||
assert_eq!(test_server.snapshot(), None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
117
rust/taskchampion/src/taskdb/undo.rs
Normal file
117
rust/taskchampion/src/taskdb/undo.rs
Normal file
@@ -0,0 +1,117 @@
|
||||
use super::apply;
|
||||
use crate::storage::{ReplicaOp, StorageTxn};
|
||||
use log::{debug, trace};
|
||||
|
||||
/// Undo local operations until an UndoPoint.
|
||||
pub(super) fn undo(txn: &mut dyn StorageTxn) -> anyhow::Result<bool> {
|
||||
let mut applied = false;
|
||||
let mut popped = false;
|
||||
let mut local_ops = txn.operations()?;
|
||||
|
||||
while let Some(op) = local_ops.pop() {
|
||||
popped = true;
|
||||
if op == ReplicaOp::UndoPoint {
|
||||
break;
|
||||
}
|
||||
debug!("Reversing operation {:?}", op);
|
||||
let rev_ops = op.reverse_ops();
|
||||
for op in rev_ops {
|
||||
trace!("Applying reversed operation {:?}", op);
|
||||
apply::apply_op(txn, &op)?;
|
||||
applied = true;
|
||||
}
|
||||
}
|
||||
|
||||
if popped {
|
||||
txn.set_operations(local_ops)?;
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
Ok(applied)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::server::SyncOp;
|
||||
use crate::taskdb::TaskDb;
|
||||
use chrono::Utc;
|
||||
use pretty_assertions::assert_eq;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[test]
|
||||
fn test_apply_create() -> anyhow::Result<()> {
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
let uuid1 = Uuid::new_v4();
|
||||
let uuid2 = Uuid::new_v4();
|
||||
let timestamp = Utc::now();
|
||||
|
||||
// apply a few ops, capture the DB state, make an undo point, and then apply a few more
|
||||
// ops.
|
||||
db.apply(SyncOp::Create { uuid: uuid1 })?;
|
||||
db.apply(SyncOp::Update {
|
||||
uuid: uuid1,
|
||||
property: "prop".into(),
|
||||
value: Some("v1".into()),
|
||||
timestamp,
|
||||
})?;
|
||||
db.apply(SyncOp::Create { uuid: uuid2 })?;
|
||||
db.apply(SyncOp::Update {
|
||||
uuid: uuid2,
|
||||
property: "prop".into(),
|
||||
value: Some("v2".into()),
|
||||
timestamp,
|
||||
})?;
|
||||
db.apply(SyncOp::Update {
|
||||
uuid: uuid2,
|
||||
property: "prop2".into(),
|
||||
value: Some("v3".into()),
|
||||
timestamp,
|
||||
})?;
|
||||
|
||||
let db_state = db.sorted_tasks();
|
||||
|
||||
db.add_undo_point()?;
|
||||
db.apply(SyncOp::Delete { uuid: uuid1 })?;
|
||||
db.apply(SyncOp::Update {
|
||||
uuid: uuid2,
|
||||
property: "prop".into(),
|
||||
value: None,
|
||||
timestamp,
|
||||
})?;
|
||||
db.apply(SyncOp::Update {
|
||||
uuid: uuid2,
|
||||
property: "prop2".into(),
|
||||
value: Some("new-value".into()),
|
||||
timestamp,
|
||||
})?;
|
||||
|
||||
assert_eq!(db.operations().len(), 9);
|
||||
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
assert!(undo(txn.as_mut())?);
|
||||
}
|
||||
|
||||
// undo took db back to the snapshot
|
||||
assert_eq!(db.operations().len(), 5);
|
||||
assert_eq!(db.sorted_tasks(), db_state);
|
||||
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
assert!(undo(txn.as_mut())?);
|
||||
}
|
||||
|
||||
// empty db
|
||||
assert_eq!(db.operations().len(), 0);
|
||||
assert_eq!(db.sorted_tasks(), vec![]);
|
||||
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
// nothing left to undo, so undo() returns false
|
||||
assert!(!undo(txn.as_mut())?);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
167
rust/taskchampion/src/taskdb/working_set.rs
Normal file
167
rust/taskchampion/src/taskdb/working_set.rs
Normal file
@@ -0,0 +1,167 @@
|
||||
use crate::storage::{StorageTxn, TaskMap};
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// Rebuild the working set using a function to identify tasks that should be in the set. This
|
||||
/// renumbers the existing working-set tasks to eliminate gaps, and also adds any tasks that
|
||||
/// are not already in the working set but should be. The rebuild occurs in a single
|
||||
/// trasnsaction against the storage backend.
|
||||
pub fn rebuild<F>(txn: &mut dyn StorageTxn, in_working_set: F, renumber: bool) -> anyhow::Result<()>
|
||||
where
|
||||
F: Fn(&TaskMap) -> bool,
|
||||
{
|
||||
let mut new_ws = vec![None]; // index 0 is always None
|
||||
let mut seen = HashSet::new();
|
||||
|
||||
// The goal here is for existing working-set items to be "compressed' down to index 1, so
|
||||
// we begin by scanning the current working set and inserting any tasks that should still
|
||||
// be in the set into new_ws, implicitly dropping any tasks that are no longer in the
|
||||
// working set.
|
||||
for elt in txn.get_working_set()?.drain(1..) {
|
||||
if let Some(uuid) = elt {
|
||||
if let Some(task) = txn.get_task(uuid)? {
|
||||
if in_working_set(&task) {
|
||||
new_ws.push(Some(uuid));
|
||||
seen.insert(uuid);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we are not renumbering, then insert a blank working-set entry here
|
||||
if !renumber {
|
||||
new_ws.push(None);
|
||||
}
|
||||
}
|
||||
|
||||
// if renumbering, clear the working set and re-add
|
||||
if renumber {
|
||||
txn.clear_working_set()?;
|
||||
for elt in new_ws.drain(1..new_ws.len()).flatten() {
|
||||
txn.add_to_working_set(elt)?;
|
||||
}
|
||||
} else {
|
||||
// ..otherwise, just clear the None items determined above from the working set
|
||||
for (i, elt) in new_ws.iter().enumerate().skip(1) {
|
||||
if elt.is_none() {
|
||||
txn.set_working_set_item(i, None)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now go hunting for tasks that should be in this list but are not, adding them at the
|
||||
// end of the list, whether renumbering or not
|
||||
for (uuid, task) in txn.all_tasks()? {
|
||||
if !seen.contains(&uuid) && in_working_set(&task) {
|
||||
txn.add_to_working_set(uuid)?;
|
||||
}
|
||||
}
|
||||
|
||||
txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::server::SyncOp;
|
||||
use crate::taskdb::TaskDb;
|
||||
use chrono::Utc;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[test]
|
||||
fn rebuild_working_set_renumber() -> anyhow::Result<()> {
|
||||
rebuild_working_set(true)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rebuild_working_set_no_renumber() -> anyhow::Result<()> {
|
||||
rebuild_working_set(false)
|
||||
}
|
||||
|
||||
fn rebuild_working_set(renumber: bool) -> anyhow::Result<()> {
|
||||
let mut db = TaskDb::new_inmemory();
|
||||
let mut uuids = vec![];
|
||||
uuids.push(Uuid::new_v4());
|
||||
println!("uuids[0]: {:?} - pending, not in working set", uuids[0]);
|
||||
uuids.push(Uuid::new_v4());
|
||||
println!("uuids[1]: {:?} - pending, in working set", uuids[1]);
|
||||
uuids.push(Uuid::new_v4());
|
||||
println!("uuids[2]: {:?} - not pending, not in working set", uuids[2]);
|
||||
uuids.push(Uuid::new_v4());
|
||||
println!("uuids[3]: {:?} - not pending, in working set", uuids[3]);
|
||||
uuids.push(Uuid::new_v4());
|
||||
println!("uuids[4]: {:?} - pending, in working set", uuids[4]);
|
||||
|
||||
// add everything to the TaskDb
|
||||
for uuid in &uuids {
|
||||
db.apply(SyncOp::Create { uuid: *uuid })?;
|
||||
}
|
||||
for i in &[0usize, 1, 4] {
|
||||
db.apply(SyncOp::Update {
|
||||
uuid: uuids[*i].clone(),
|
||||
property: String::from("status"),
|
||||
value: Some("pending".into()),
|
||||
timestamp: Utc::now(),
|
||||
})?;
|
||||
}
|
||||
|
||||
// set the existing working_set as we want it
|
||||
{
|
||||
let mut txn = db.storage.txn()?;
|
||||
txn.clear_working_set()?;
|
||||
|
||||
for i in &[1usize, 3, 4] {
|
||||
txn.add_to_working_set(uuids[*i])?;
|
||||
}
|
||||
|
||||
txn.commit()?;
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
db.working_set()?,
|
||||
vec![
|
||||
None,
|
||||
Some(uuids[1].clone()),
|
||||
Some(uuids[3].clone()),
|
||||
Some(uuids[4].clone())
|
||||
]
|
||||
);
|
||||
|
||||
rebuild(
|
||||
db.storage.txn()?.as_mut(),
|
||||
|t| {
|
||||
if let Some(status) = t.get("status") {
|
||||
status == "pending"
|
||||
} else {
|
||||
false
|
||||
}
|
||||
},
|
||||
renumber,
|
||||
)?;
|
||||
|
||||
let exp = if renumber {
|
||||
// uuids[1] and uuids[4] are already in the working set, so are compressed
|
||||
// to the top, and then uuids[0] is added.
|
||||
vec![
|
||||
None,
|
||||
Some(uuids[1].clone()),
|
||||
Some(uuids[4].clone()),
|
||||
Some(uuids[0].clone()),
|
||||
]
|
||||
} else {
|
||||
// uuids[1] and uuids[4] are already in the working set, at indexes 1 and 3,
|
||||
// and then uuids[0] is added.
|
||||
vec![
|
||||
None,
|
||||
Some(uuids[1].clone()),
|
||||
None,
|
||||
Some(uuids[4].clone()),
|
||||
Some(uuids[0].clone()),
|
||||
]
|
||||
};
|
||||
|
||||
assert_eq!(db.working_set()?, exp);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user