Skip to content

Commit

Permalink
throw error when scheduling with past expired_at
Browse files Browse the repository at this point in the history
  • Loading branch information
v9n committed Oct 11, 2023
1 parent 89b5bac commit 3e4b3af
Show file tree
Hide file tree
Showing 3 changed files with 98 additions and 45 deletions.
28 changes: 28 additions & 0 deletions pallets/automation-price/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,34 @@ benchmarks! {
AutomationPrice::<T>::run_xcmp_task(destination, creator, fee, call, Weight::from_ref_time(100_000), Weight::from_ref_time(200_000), InstructionSequence::PayThroughSovereignAccount)
}

//remove_task {
// let creator : T::AccountId = account("caller", 0, SEED);
// let para_id: u32 = 1000;
// let call: Vec<u8> = vec![2, 4, 5];
// setup_asset::<T>(vec![creator.clone()]);
// let transfer_amount = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into());
// T::Currency::deposit_creating(
// &creator,
// transfer_amount.clone().saturating_mul(DEPOSIT_MULTIPLIER.into()),
// );

// // Schedule 10000 Task, This is just an arbitrary number to simular a big task registry
// // Because of using StoragMap, and avoid dealing with vector
// // our task look up will always be O(1) for time
// let mut task_ids: Vec<TaskId> = vec![];
// for i in 1..100 {
// direct_task_schedule::<T>(creator.clone(), format!("{:?}", i).as_bytes().to_vec(), i, "gt".as_bytes().to_vec(), i, vec![100, 200, (i % 256) as u8]);
// task_ids.push(format!("{:?}", i).as_bytes().to_vec());
// }

// let task_id_to_cancel = "1".as_bytes().to_vec();

// let task =
//}: {
// AutomationPrice::<T>::remove_task()
//}


emit_event {
let who: T::AccountId = account("call", 1, SEED);
let task_id: TaskId = vec![1,2,3];
Expand Down
59 changes: 42 additions & 17 deletions pallets/automation-price/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -289,10 +289,17 @@ pub mod pallet {
>;

// SortedTasksByExpiration is our expiration sorted tasks
#[pallet::type_value]
pub fn DefaultSortedTasksByExpiration<T: Config>() -> BTreeMap<u128, TaskIdList<T>> {
BTreeMap::<u128, TaskIdList<T>>::new()
}
#[pallet::storage]
#[pallet::getter(fn get_sorted_tasks_by_expiration)]
pub type SortedTasksByExpiration<T> =
StorageValue<_, BTreeMap<u128, TaskIdList<T>>, ValueQuery>;
pub type SortedTasksByExpiration<T> = StorageValue<
Value = BTreeMap<u128, TaskIdList<T>>,
QueryKind = ValueQuery,
OnEmpty = DefaultSortedTasksByExpiration<T>,
>;

// All active tasks, but organized by account
// In this storage, we only interested in returning task belong to an account, we also want to
Expand Down Expand Up @@ -367,6 +374,8 @@ pub mod pallet {
TaskRemoveFailure,
/// Task Not Found When canceling
TaskDoesNotExist,
/// Error when setting task expired less than the current block time
InvalidTaskExpiredAt,
/// Error when failed to update task expiration storage
TaskExpiredStorageFailedToUpdate,
/// Insufficient Balance
Expand Down Expand Up @@ -1188,7 +1197,12 @@ pub mod pallet {
}
}

// TODO: check atomic
// Handle task removal. There are a few places task need to be remove:
// Tasks storage
// TaskQueue if the task is alreayd queue
// TaskStats: decrease task count
// AccountStats: decrease task count
// SortedTasksIndex
fn remove_task(task: &Task<T>) {
Tasks::<T>::remove(task.owner_id.clone(), task.task_id.clone());

Expand Down Expand Up @@ -1230,13 +1244,17 @@ pub mod pallet {

let now = current_block_time.unwrap() as u128;

let mut unused_weight = remaining_weight.saturating_sub(T::DbWeight::get().reads(1u64));
// At the end we will most likely need to write back the updated storage, so here we
// account for that write
let mut unused_weight = remaining_weight
.saturating_sub(T::DbWeight::get().reads(1u64))
.saturating_sub(T::DbWeight::get().writes(1u64));
let mut tasks_by_expiration = Self::get_sorted_tasks_by_expiration();

let mut expired_shards: Vec<u128> = vec![];
// Use Included(now) because if this task has not run at the end of this block, then
// that mean at next block it for sure will expired
for (expired_time, task_ids) in
'outer: for (expired_time, task_ids) in
tasks_by_expiration.range_mut((Included(&0_u128), Included(&now)))
{
for (owner_id, task_id) in task_ids.iter() {
Expand All @@ -1253,10 +1271,14 @@ pub mod pallet {
// Now let remove the task from chain storage
if let Some(task) = Self::get_task(owner_id, task_id) {
Self::remove_task(&task);
expired_shards.push(expired_time.clone());
}
} else {
// If there is not enough weight left, break all the way out, we had
// already save one weight for the write to update storage back
break 'outer
}
}
expired_shards.push(expired_time.clone());
}

for expired_time in expired_shards.iter() {
Expand All @@ -1267,22 +1289,13 @@ pub mod pallet {
unused_weight
}

// Task is write into a sorted storage, re-present bya BTreeMap so we can expired them
// Task is write into a sorted storage, re-present by BTreeMap so we can find and expired them
pub fn put_task_to_expired_queue(task: &Task<T>) -> Result<bool, Error<T>> {
if !SortedTasksByExpiration::<T>::exists() {
let mut tasks_by_expiration = BTreeMap::<u128, TaskIdList<T>>::new();
tasks_by_expiration.insert(
task.expired_at.clone(),
vec![(task.owner_id.clone(), task.task_id.clone())],
);
SortedTasksByExpiration::<T>::put(tasks_by_expiration);
return Ok(true)
}

// first we got back the reference to the underlying storage
// perform relevant update to write task to the right shard by expired
// time, then the value is store back to storage
let mut tasks_by_expiration = Self::get_sorted_tasks_by_expiration();

if let Some(task_shard) = tasks_by_expiration.get_mut(&task.expired_at) {
task_shard.push((task.owner_id.clone(), task.task_id.clone()));
} else {
Expand All @@ -1306,6 +1319,18 @@ pub mod pallet {
Err(Error::<T>::InvalidTaskId)?
}

let current_block_time = Self::get_current_block_time();
if current_block_time.is_err() {
// Cannot get time, this probably is the first block
Err(Error::<T>::BlockTimeNotSet)?
}

let now = current_block_time.unwrap() as u128;

if task.expired_at <= now {
Err(Error::<T>::InvalidTaskExpiredAt)?
}

let total_task = Self::get_task_stat(StatType::TotalTasksOverall).map_or(0, |v| v);
let total_task_per_account =
Self::get_account_stat(&task.owner_id, StatType::TotalTasksPerAccount)
Expand Down
56 changes: 28 additions & 28 deletions pallets/automation-price/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ use xcm::latest::{prelude::*, Junction::Parachain, MultiLocation};
use crate::weights::WeightInfo;

pub const START_BLOCK_TIME: u64 = 33198768000 * 1_000;
pub const START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND: u128 = 33198768000 + 3600;

struct XcmpActionParams {
destination: MultiLocation,
Expand Down Expand Up @@ -315,7 +316,7 @@ fn test_schedule_xcmp_task_ok() {
exchange1.to_vec(),
asset1.to_vec(),
asset2.to_vec(),
1005u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND,
"gt".as_bytes().to_vec(),
vec!(100),
Box::new(destination.into()),
Expand Down Expand Up @@ -347,7 +348,7 @@ fn test_schedule_xcmp_task_ok() {
assert_eq!(task.chain, chain1.to_vec(), "created task has different chain id");
assert_eq!(task.asset_pair.0, asset1, "created task has wrong asset pair");

assert_eq!(1005u128, task.expired_at);
assert_eq!(START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND, task.expired_at);

// Ensure task is inserted into the right SortedIndex

Expand All @@ -358,7 +359,7 @@ fn test_schedule_xcmp_task_ok() {
exchange1.to_vec(),
asset1.to_vec(),
asset2.to_vec(),
1005u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND,
"gt".as_bytes().to_vec(),
vec!(100),
Box::new(destination.into()),
Expand Down Expand Up @@ -422,7 +423,7 @@ fn test_schedule_put_task_to_expiration_queue() {
exchange1.to_vec(),
asset1.to_vec(),
asset2.to_vec(),
1005u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND,
"gt".as_bytes().to_vec(),
vec!(100),
Box::new(destination.into()),
Expand All @@ -440,7 +441,9 @@ fn test_schedule_put_task_to_expiration_queue() {

let task_expiration_map = AutomationPrice::get_sorted_tasks_by_expiration();
assert_eq!(
task_expiration_map.get(&1005u128).expect("missing task expiration shard"),
task_expiration_map
.get(&(START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND))
.expect("missing task expiration shard"),
&vec![(creator, task_id.clone())]
);
})
Expand All @@ -463,7 +466,7 @@ fn test_schedule_put_task_to_expiration_queue_multi() {
exchange1.to_vec(),
asset1.to_vec(),
asset2.to_vec(),
1234u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND,
"gt".as_bytes().to_vec(),
vec!(100),
Box::new(destination.into()),
Expand All @@ -485,7 +488,7 @@ fn test_schedule_put_task_to_expiration_queue_multi() {
exchange1.to_vec(),
asset1.to_vec(),
asset2.to_vec(),
5678u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 3600,
"lt".as_bytes().to_vec(),
vec!(100),
Box::new(destination.into()),
Expand All @@ -503,11 +506,15 @@ fn test_schedule_put_task_to_expiration_queue_multi() {

let task_expiration_map = AutomationPrice::get_sorted_tasks_by_expiration();
assert_eq!(
task_expiration_map.get(&1234u128).expect("missing task expiration shard"),
task_expiration_map
.get(&START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND)
.expect("missing task expiration shard"),
&vec![(creator1, task_id1.clone())]
);
assert_eq!(
task_expiration_map.get(&5678u128).expect("missing task expiration shard"),
task_expiration_map
.get(&(START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 3600))
.expect("missing task expiration shard"),
&vec![(creator2, task_id2.clone())]
);
})
Expand Down Expand Up @@ -539,10 +546,7 @@ fn test_sweep_expired_task_works() {
chain: chain1.to_vec(),
exchange: exchange1.to_vec(),
asset_pair: (asset1.to_vec(), asset2.to_vec()),
expired_at: START_BLOCK_TIME
.checked_div(1000)
.map_or(10000000_u128, |v| v.into())
.saturating_add(i),
expired_at: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND - 1800,
trigger_function: "gt".as_bytes().to_vec(),
trigger_params: vec![2000],
action: Action::XCMP {
Expand All @@ -568,10 +572,7 @@ fn test_sweep_expired_task_works() {
chain: chain1.to_vec(),
exchange: exchange1.to_vec(),
asset_pair: (asset1.to_vec(), asset2.to_vec()),
expired_at: START_BLOCK_TIME
.checked_div(1000)
.map_or(10000000_u128, |v| v.into())
.saturating_add(7200),
expired_at: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 3600,
trigger_function: "gt".as_bytes().to_vec(),
trigger_params: vec![2000],
action: Action::XCMP {
Expand Down Expand Up @@ -618,7 +619,7 @@ fn test_schedule_return_error_when_reaching_max_tasks_overall_limit() {
exchange1.to_vec(),
asset1.to_vec(),
asset2.to_vec(),
1005u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 3600,
"gt".as_bytes().to_vec(),
vec!(100),
Box::new(destination.into()),
Expand Down Expand Up @@ -655,7 +656,7 @@ fn test_schedule_return_error_when_reaching_max_account_tasks_limit() {
exchange1.to_vec(),
asset1.to_vec(),
asset2.to_vec(),
1005u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND,
"gt".as_bytes().to_vec(),
vec!(100),
Box::new(destination.into()),
Expand Down Expand Up @@ -692,7 +693,7 @@ fn test_shift_tasks_movement_through_price_changes() {
exchange1.to_vec(),
asset1.to_vec(),
asset2.to_vec(),
1000u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND,
"gt".as_bytes().to_vec(),
vec!(10100),
Box::new(destination.into()),
Expand All @@ -712,7 +713,7 @@ fn test_shift_tasks_movement_through_price_changes() {
exchange1.to_vec(),
asset2.to_vec(),
asset3.to_vec(),
3000u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND,
"gt".as_bytes().to_vec(),
vec!(10900),
Box::new(destination.into()),
Expand All @@ -732,7 +733,7 @@ fn test_shift_tasks_movement_through_price_changes() {
exchange1.to_vec(),
asset1.to_vec(),
asset3.to_vec(),
6000u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND + 6000,
"gt".as_bytes().to_vec(),
vec!(102000),
Box::new(destination.into()),
Expand Down Expand Up @@ -834,7 +835,7 @@ fn test_shift_tasks_movement_through_price_changes() {
exchange1.to_vec(),
asset2.to_vec(),
asset3.to_vec(),
3000u128,
START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND,
"lt".as_bytes().to_vec(),
// price for this asset is 10 in our last update
vec!(20),
Expand Down Expand Up @@ -1068,10 +1069,7 @@ fn test_expired_task_not_run() {
chain: chain1.to_vec(),
exchange: exchange1.to_vec(),
asset_pair: (asset1.to_vec(), asset2.to_vec()),
expired_at: START_BLOCK_TIME
.checked_div(1000)
.map_or(10000000_u128, |v| v.into())
.saturating_sub(100),
expired_at: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND,
trigger_function: "gt".as_bytes().to_vec(),
trigger_params: vec![123],
action: Action::XCMP {
Expand All @@ -1088,6 +1086,8 @@ fn test_expired_task_not_run() {

AutomationPrice::validate_and_schedule_task(task.clone());

// Moving the clock to simulate the task expiration
Timestamp::set_timestamp(START_BLOCK_TIME.saturating_add(7200_000_u64).try_into().unwrap());
AutomationPrice::run_tasks(
vec![(task.owner_id.clone(), task.task_id.clone())],
100_000_000_000.into(),
Expand Down Expand Up @@ -1203,7 +1203,7 @@ fn test_cancel_task_works() {
chain: chain1.to_vec(),
exchange: exchange1.to_vec(),
asset_pair: (asset1.to_vec(), asset2.to_vec()),
expired_at: 123_u128,
expired_at: START_BLOCK_TIME_1HOUR_AFTER_IN_SECOND,
trigger_function: "gt".as_bytes().to_vec(),
trigger_params: vec![123],
action: Action::XCMP {
Expand Down

0 comments on commit 3e4b3af

Please sign in to comment.