Skip to content

Commit

Permalink
Optimize & cleanup with clippy (#427)
Browse files Browse the repository at this point in the history
* Refactor: Optimize code.

* Refactor: Remove unwanted clone, return

* Refactor: Revert match macro.

* Add: More cleanup

---------

Co-authored-by: Chris Li <chrisli30@users.noreply.github.com>
  • Loading branch information
siddharthteli12 and chrisli30 authored Nov 28, 2023
1 parent b4903cb commit 12c9953
Show file tree
Hide file tree
Showing 18 changed files with 111 additions and 142 deletions.
4 changes: 2 additions & 2 deletions node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,7 @@ pub fn run() -> Result<()> {
runner.run_node_until_exit(|config| async move {
let hwbench = if !cli.no_hardware_benchmarks {
config.database.path().map(|database_path| {
let _ = std::fs::create_dir_all(&database_path);
let _ = std::fs::create_dir_all(database_path);
sc_sysinfo::gather_hwbench(Some(database_path))
})
} else {
Expand All @@ -411,7 +411,7 @@ pub fn run() -> Result<()> {
let chain_spec = &config.chain_spec;
let para_id = chain_spec::Extensions::try_get(&*config.chain_spec)
.map(|e| e.para_id)
.ok_or_else(|| "Could not find parachain ID in chain-spec.")?;
.ok_or("Could not find parachain ID in chain-spec.")?;

let polkadot_cli = RelayChainCli::new(
&config,
Expand Down
6 changes: 3 additions & 3 deletions node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ where
let block_import = ParachainBlockImport::new(client.clone(), backend.clone());

let import_queue = build_import_queue(
block_import.clone(),
block_import,
client.clone(),
config,
telemetry.as_ref().map(|telemetry| telemetry.handle()),
Expand Down Expand Up @@ -498,8 +498,8 @@ where

cumulus_client_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(
cumulus_client_consensus_aura::ImportQueueParams {
block_import: block_import.clone(),
client: client.clone(),
block_import,
client,
create_inherent_data_providers: move |_, _| async move {
let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

Expand Down
5 changes: 1 addition & 4 deletions pallets/automation-price/rpc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,7 @@ use sp_api::ProvideRuntimeApi;
use sp_blockchain::HeaderBackend;
use sp_core::Bytes;
use sp_rpc::number::NumberOrHex;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, MaybeDisplay},
};
use sp_runtime::traits::{Block as BlockT, MaybeDisplay};
use std::sync::Arc;

/// An RPC endpoint to provide information about tasks.
Expand Down
23 changes: 10 additions & 13 deletions pallets/automation-price/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,11 @@
use super::*;
use frame_benchmarking::{account, benchmarks};
use frame_system::RawOrigin;
use pallet_timestamp::Pallet as Timestamp;

use polkadot_parachain::primitives::Sibling;
use sp_runtime::traits::{AccountIdConversion, Saturating};
use sp_std::cmp;
use xcm::{
latest::{prelude::*, MultiLocation},
VersionedMultiLocation,
};

use xcm::latest::{prelude::*, MultiLocation};

use crate::{
pallet::{Task, TaskId},
Expand Down Expand Up @@ -68,7 +65,7 @@ fn schedule_xcmp_task<T: Config>(
expired_at: u128,
) {
AutomationPrice::<T>::schedule_xcmp_task(
RawOrigin::Signed(owner.clone()).into(),
RawOrigin::Signed(owner).into(),
chain.to_vec(),
exchange.to_vec(),
asset_tur.to_vec(),
Expand All @@ -82,7 +79,7 @@ fn schedule_xcmp_task<T: Config>(
asset_location: MultiLocation::new(1, X1(Parachain(para_id))).into(),
amount: 0,
}),
call.clone(),
call,
Weight::from_parts(100_000, 0),
Weight::from_parts(200_000, 0),
);
Expand Down Expand Up @@ -123,7 +120,7 @@ fn direct_task_schedule<T: Config>(

let task: Task<T> = Task::<T> {
owner_id: creator,
task_id: task_id.clone(),
task_id,
chain: chain.to_vec(),
exchange: exchange.to_vec(),
asset_pair: (asset_tur.to_vec(), asset_usd.to_vec()),
Expand Down Expand Up @@ -198,7 +195,7 @@ benchmarks! {
let transfer_amount = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into());
T::Currency::deposit_creating(
&sender,
transfer_amount.clone().saturating_mul(DEPOSIT_MULTIPLIER.into()),
transfer_amount.saturating_mul(DEPOSIT_MULTIPLIER.into()),
);

} : {
Expand All @@ -213,7 +210,7 @@ benchmarks! {
let transfer_amount = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into());
T::Currency::deposit_creating(
&creator,
transfer_amount.clone().saturating_mul(DEPOSIT_MULTIPLIER.into()),
transfer_amount.saturating_mul(DEPOSIT_MULTIPLIER.into()),
);

// Schedule 10000 Task, This is just an arbitrary number to simular a big task registry
Expand Down Expand Up @@ -261,7 +258,7 @@ benchmarks! {
let transfer_amount = T::Currency::minimum_balance().saturating_mul(ED_MULTIPLIER.into());
T::Currency::deposit_creating(
&creator,
transfer_amount.clone().saturating_mul(DEPOSIT_MULTIPLIER.into()),
transfer_amount.saturating_mul(DEPOSIT_MULTIPLIER.into()),
);

let para_id: u32 = 2000;
Expand All @@ -284,7 +281,7 @@ benchmarks! {
let task_id = format!("{:?}", i).as_bytes().to_vec();
let expired_at = i;
let trigger_function = "gt".as_bytes().to_vec();
let price_target: u128 = i.into();
let price_target: u128 = i;
let encoded_call = vec![100, 200, (i % 256) as u8];

task_ids.push(format!("{:?}", i).as_bytes().to_vec());
Expand Down
56 changes: 22 additions & 34 deletions pallets/automation-price/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,7 @@ use cumulus_primitives_core::InteriorMultiLocation;

use cumulus_primitives_core::ParaId;
use frame_support::{
pallet_prelude::*,
traits::{Currency, ExistenceRequirement},
transactional,
pallet_prelude::*, traits::Currency, transactional,
weights::constants::WEIGHT_REF_TIME_PER_SECOND,
};
use frame_system::pallet_prelude::*;
Expand All @@ -67,13 +65,7 @@ use sp_runtime::{
traits::{CheckedConversion, Convert, SaturatedConversion, Saturating},
ArithmeticError, Perbill,
};
use sp_std::{
boxed::Box,
collections::btree_map::BTreeMap,
ops::Bound::{Excluded, Included},
vec,
vec::Vec,
};
use sp_std::{boxed::Box, collections::btree_map::BTreeMap, ops::Bound::Included, vec, vec::Vec};

pub use pallet_xcmp_handler::InstructionSequence;
use primitives::EnsureProxy;
Expand Down Expand Up @@ -634,7 +626,7 @@ pub mod pallet {
let exchange = exchanges[index].clone();
let asset1 = assets1[index].clone();
let asset2 = assets2[index].clone();
let round = rounds[index].clone();
let round = rounds[index];

let key = (&chain, &exchange, (&asset1, &asset2));

Expand All @@ -651,7 +643,7 @@ pub mod pallet {
// TODO: Eventually we will need to handle submitted_at and round properly when
// we had more than one oracle
// Currently not doing that check for the simplicity shake of interface
let this_round = match Self::get_asset_price_data(&key) {
let this_round = match Self::get_asset_price_data(key) {
Some(previous_price) => previous_price.round + 1,
None => round,
};
Expand All @@ -671,7 +663,7 @@ pub mod pallet {
});
}
}
Ok(().into())
Ok(())
}

/// Delete an asset. Delete may not happen immediately if there was task scheduled for
Expand Down Expand Up @@ -699,7 +691,7 @@ pub mod pallet {
ensure_root(origin)?;

let key = (&chain, &exchange, (&asset1, &asset2));
if let Some(asset_info) = Self::get_asset_registry_info(&key) {
if let Some(asset_info) = Self::get_asset_registry_info(key) {
AssetRegistry::<T>::remove(&key);
PriceRegistry::<T>::remove(&key);
Self::deposit_event(Event::AssetDeleted { chain, exchange, asset1, asset2 });
Expand Down Expand Up @@ -880,15 +872,11 @@ pub mod pallet {
impl<T: Config> Pallet<T> {
pub fn generate_task_id() -> TaskId {
let current_block_number =
match TryInto::<u64>::try_into(<frame_system::Pallet<T>>::block_number()).ok() {
Some(i) => i,
None => 0,
};
TryInto::<u64>::try_into(<frame_system::Pallet<T>>::block_number())
.ok()
.unwrap_or(0);

let tx_id = match <frame_system::Pallet<T>>::extrinsic_index() {
Some(i) => i,
None => 0,
};
let tx_id = <frame_system::Pallet<T>>::extrinsic_index().unwrap_or(0);

let evt_index = <frame_system::Pallet<T>>::event_count();

Expand All @@ -902,7 +890,7 @@ pub mod pallet {
let weight_left: Weight = max_weight;

// TODO: Look into asset that has price move instead
let ref mut task_to_process: TaskIdList<T> = Vec::new();
let task_to_process: &mut TaskIdList<T> = &mut Vec::new();

for key in SortedTasksIndex::<T>::iter_keys() {
let (chain, exchange, asset_pair, trigger_func) = key.clone();
Expand Down Expand Up @@ -943,7 +931,7 @@ pub mod pallet {
{
// Remove because we map this into task queue
tasks.remove(&price);
let ref mut t = &mut (task_ids.clone());
let t = &mut (&mut (task_ids.clone()));
task_to_process.append(t);
}

Expand All @@ -966,7 +954,7 @@ pub mod pallet {
};
}

return weight_left
weight_left
}

/// Trigger tasks for the block time.
Expand Down Expand Up @@ -995,7 +983,7 @@ pub mod pallet {
.saturating_sub(T::DbWeight::get().reads(1u64))
// For measuring the TaskQueue::<T>::put(tasks_left);
.saturating_sub(T::DbWeight::get().writes(1u64));
if task_queue.len() > 0 {
if !task_queue.is_empty() {
let (tasks_left, new_weight_left) = Self::run_tasks(task_queue, weight_left);
weight_left = new_weight_left;
TaskQueue::<T>::put(tasks_left);
Expand Down Expand Up @@ -1180,7 +1168,7 @@ pub mod pallet {
for (owner_id, task_id) in task_ids.iter() {
consumed_task_index.saturating_inc();

let action_weight = match Self::get_task(&owner_id, &task_id) {
let action_weight = match Self::get_task(owner_id, task_id) {
None => {
Self::deposit_event(Event::TaskNotFound {
owner_id: owner_id.clone(),
Expand Down Expand Up @@ -1286,7 +1274,7 @@ pub mod pallet {

// Remove it from SortedTasksIndex
let key = (&task.chain, &task.exchange, &task.asset_pair, &task.trigger_function);
if let Some(mut sorted_tasks_by_price) = Self::get_sorted_tasks_index(&key) {
if let Some(mut sorted_tasks_by_price) = Self::get_sorted_tasks_index(key) {
if let Some(tasks) = sorted_tasks_by_price.get_mut(&task.trigger_params[0]) {
if let Some(pos) = tasks.iter().position(|x| {
let (_, task_id) = x;
Expand Down Expand Up @@ -1387,7 +1375,7 @@ pub mod pallet {
task_id: task.task_id.clone(),
condition: TaskCondition::AlreadyExpired {
expired_at: task.expired_at,
now: now.into(),
now,
},
}),
);
Expand All @@ -1398,7 +1386,7 @@ pub mod pallet {
break 'outer
}
}
expired_shards.push(expired_time.clone());
expired_shards.push(*expired_time);
}

unused_weight
Expand All @@ -1415,13 +1403,13 @@ pub mod pallet {
task_shard.insert(task.task_id.clone(), task.owner_id.clone());
} else {
tasks_by_expiration.insert(
task.expired_at.clone(),
task.expired_at,
BTreeMap::from([(task.task_id.clone(), task.owner_id.clone())]),
);
}
SortedTasksByExpiration::<T>::put(tasks_by_expiration);

return Ok(true)
Ok(true)
}

/// With transaction will protect against a partial success where N of M execution times might be full,
Expand Down Expand Up @@ -1527,11 +1515,11 @@ pub mod pallet {
},
);

if let Err(_) = fee_result {
if fee_result.is_err() {
Err(Error::<T>::FeePaymentError)?
}

if let Err(_) = Self::track_expired_task(&task) {
if Self::track_expired_task(&task).is_err() {
Err(Error::<T>::TaskExpiredStorageFailedToUpdate)?
}

Expand Down
2 changes: 1 addition & 1 deletion pallets/automation-price/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use crate as pallet_automation_price;
use crate::TaskId;

use frame_support::{
assert_noop, assert_ok, construct_runtime, parameter_types,
assert_ok, construct_runtime, parameter_types,
traits::{ConstU32, Contains, Everything},
weights::Weight,
PalletId,
Expand Down
Loading

0 comments on commit 12c9953

Please sign in to comment.