diff --git a/CHANGELOG.md b/CHANGELOG.md index 46c66be06b..3e37c6b50a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,20 @@ Valid settings are: - `--log-viewer` flag with `dfx canister create` - `canisters[].initialization_values.log_visibility.allowed_viewers` in `dfx.json` +### feat: batch upload assets + +The frontend canister sync now tries to batch multiple small content chunks into a single call using the `create_chunks` method added earlier. +This should lead to significantly faster upload times for frontends with many small files. + +## Dependencies + +### Frontend canister + +Bumped `api_version` to `2` for the previous addition of `create_chunks` since the improved file sync relies on it. + +- Module hash: 9e4485d4358dd910aebcc025843547d05604cf28c6dc7c2cc2f8c76d083112e8 +- https://github.com/dfinity/sdk/pull/3947 + # 0.24.1 ### feat: More PocketIC flags supported diff --git a/src/canisters/frontend/ic-asset/src/batch_upload/operations.rs b/src/canisters/frontend/ic-asset/src/batch_upload/operations.rs index b3363b6e39..518fb58330 100644 --- a/src/canisters/frontend/ic-asset/src/batch_upload/operations.rs +++ b/src/canisters/frontend/ic-asset/src/batch_upload/operations.rs @@ -10,9 +10,12 @@ use crate::canister_api::types::batch_upload::v1::{BatchOperationKind, CommitBat use candid::Nat; use std::collections::HashMap; +use super::plumbing::ChunkUploader; + pub(crate) const BATCH_UPLOAD_API_VERSION: u16 = 1; -pub(crate) fn assemble_batch_operations( +pub(crate) async fn assemble_batch_operations( + chunk_uploader: Option<&ChunkUploader<'_>>, project_assets: &HashMap, canister_assets: HashMap, asset_deletion_reason: AssetDeletionReason, @@ -30,13 +33,14 @@ pub(crate) fn assemble_batch_operations( ); create_new_assets(&mut operations, project_assets, &canister_assets); unset_obsolete_encodings(&mut operations, project_assets, &canister_assets); - set_encodings(&mut operations, project_assets); + set_encodings(&mut operations, chunk_uploader, project_assets).await; update_properties(&mut operations, project_assets, &canister_asset_properties); operations } -pub(crate) fn assemble_commit_batch_arguments( +pub(crate) async fn assemble_commit_batch_arguments( + chunk_uploader: &ChunkUploader<'_>, project_assets: HashMap, canister_assets: HashMap, asset_deletion_reason: AssetDeletionReason, @@ -44,11 +48,13 @@ pub(crate) fn assemble_commit_batch_arguments( batch_id: Nat, ) -> CommitBatchArguments { let operations = assemble_batch_operations( + Some(chunk_uploader), &project_assets, canister_assets, asset_deletion_reason, canister_asset_properties, - ); + ) + .await; CommitBatchArguments { operations, batch_id, @@ -153,8 +159,9 @@ pub(crate) fn unset_obsolete_encodings( } } -pub(crate) fn set_encodings( +pub(crate) async fn set_encodings( operations: &mut Vec, + chunk_uploader: Option<&ChunkUploader<'_>>, project_assets: &HashMap, ) { for (key, project_asset) in project_assets { @@ -162,12 +169,18 @@ pub(crate) fn set_encodings( if v.already_in_place { continue; } - + let chunk_ids = if let Some(uploader) = chunk_uploader { + uploader + .uploader_ids_to_canister_chunk_ids(&v.uploader_chunk_ids) + .await + } else { + vec![] + }; operations.push(BatchOperationKind::SetAssetContent( SetAssetContentArguments { key: key.clone(), content_encoding: content_encoding.clone(), - chunk_ids: v.chunk_ids.clone(), + chunk_ids, sha256: Some(v.sha256.clone()), }, )); diff --git a/src/canisters/frontend/ic-asset/src/batch_upload/plumbing.rs b/src/canisters/frontend/ic-asset/src/batch_upload/plumbing.rs index 661e1e0dd4..640b9d48e0 100644 --- a/src/canisters/frontend/ic-asset/src/batch_upload/plumbing.rs +++ b/src/canisters/frontend/ic-asset/src/batch_upload/plumbing.rs @@ -3,6 +3,7 @@ use crate::asset::content::Content; use crate::asset::content_encoder::ContentEncoder; use crate::batch_upload::semaphores::Semaphores; use crate::canister_api::methods::chunk::create_chunk; +use crate::canister_api::methods::chunk::create_chunks; use crate::canister_api::types::asset::AssetDetails; use crate::error::CreateChunkError; use crate::error::CreateEncodingError; @@ -14,10 +15,12 @@ use futures::TryFutureExt; use ic_utils::Canister; use mime::Mime; use slog::{debug, info, Logger}; +use std::collections::BTreeMap; use std::collections::HashMap; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; +use tokio::sync::Mutex; const CONTENT_ENCODING_IDENTITY: &str = "identity"; @@ -35,7 +38,7 @@ pub(crate) struct AssetDescriptor { } pub(crate) struct ProjectAssetEncoding { - pub(crate) chunk_ids: Vec, + pub(crate) uploader_chunk_ids: Vec, pub(crate) sha256: Vec, pub(crate) already_in_place: bool, } @@ -46,30 +49,68 @@ pub(crate) struct ProjectAsset { pub(crate) encodings: HashMap, } +type IdMapping = BTreeMap; +type UploadQueue = Vec<(usize, Vec)>; pub(crate) struct ChunkUploader<'agent> { canister: Canister<'agent>, batch_id: Nat, + api_version: u16, chunks: Arc, bytes: Arc, + // maps uploader_chunk_id to canister_chunk_id + id_mapping: Arc>, + upload_queue: Arc>, } + impl<'agent> ChunkUploader<'agent> { - pub(crate) fn new(canister: Canister<'agent>, batch_id: Nat) -> Self { + pub(crate) fn new(canister: Canister<'agent>, api_version: u16, batch_id: Nat) -> Self { Self { canister, batch_id, + api_version, chunks: Arc::new(AtomicUsize::new(0)), bytes: Arc::new(AtomicUsize::new(0)), + id_mapping: Arc::new(Mutex::new(BTreeMap::new())), + upload_queue: Arc::new(Mutex::new(vec![])), } } + /// Returns an uploader_chunk_id, which is different from the chunk id on the asset canister. + /// uploader_chunk_id can be mapped to canister_chunk_id using `uploader_ids_to_canister_chunk_ids` + /// once `finalize_upload` has completed. pub(crate) async fn create_chunk( &self, contents: &[u8], semaphores: &Semaphores, - ) -> Result { - self.chunks.fetch_add(1, Ordering::SeqCst); + ) -> Result { + let uploader_chunk_id = self.chunks.fetch_add(1, Ordering::SeqCst); self.bytes.fetch_add(contents.len(), Ordering::SeqCst); - create_chunk(&self.canister, &self.batch_id, contents, semaphores).await + if contents.len() == MAX_CHUNK_SIZE || self.api_version < 2 { + let canister_chunk_id = + create_chunk(&self.canister, &self.batch_id, contents, semaphores).await?; + let mut map = self.id_mapping.lock().await; + map.insert(uploader_chunk_id, canister_chunk_id); + Ok(uploader_chunk_id) + } else { + self.add_to_upload_queue(uploader_chunk_id, contents).await; + // Larger `max_retained_bytes` leads to batches that are filled closer to the max size. + // `4 * MAX_CHUNK_SIZE` leads to a pretty small memory footprint but still offers solid fill rates. + // Mini experiment: + // - Tested with: `for i in $(seq 1 50); do dd if=/dev/urandom of="src/hello_frontend/assets/file_$i.bin" bs=$(shuf -i 1-2000000 -n 1) count=1; done && dfx deploy hello_frontend` + // - Result: Roughly 15% of batches under 90% full. + // With other byte ranges (e.g. `shuf -i 1-3000000 -n 1`) stats improve significantly + self.upload_chunks(4 * MAX_CHUNK_SIZE, usize::MAX, semaphores) + .await?; + Ok(uploader_chunk_id) + } + } + + pub(crate) async fn finalize_upload( + &self, + semaphores: &Semaphores, + ) -> Result<(), CreateChunkError> { + self.upload_chunks(0, 0, semaphores).await?; + Ok(()) } pub(crate) fn bytes(&self) -> usize { @@ -78,6 +119,80 @@ impl<'agent> ChunkUploader<'agent> { pub(crate) fn chunks(&self) -> usize { self.chunks.load(Ordering::SeqCst) } + + /// Call only after `finalize_upload` has completed + pub(crate) async fn uploader_ids_to_canister_chunk_ids( + &self, + uploader_ids: &[usize], + ) -> Vec { + let mapping = self.id_mapping.lock().await; + uploader_ids + .iter() + .map(|id| { + mapping + .get(id) + .expect("Chunk uploader did not upload all chunks. This is a bug.") + .clone() + }) + .collect() + } + + async fn add_to_upload_queue(&self, uploader_chunk_id: usize, contents: &[u8]) { + let mut queue = self.upload_queue.lock().await; + queue.push((uploader_chunk_id, contents.into())); + } + + /// Calls `upload_chunks` with batches of chunks from `self.upload_queue` until at most `max_retained_bytes` + /// bytes and at most `max_retained_chunks` chunks remain in the upload queue. Larger values + /// will lead to better batch fill rates but also leave a larger memory footprint. + async fn upload_chunks( + &self, + max_retained_bytes: usize, + max_retained_chunks: usize, + semaphores: &Semaphores, + ) -> Result<(), CreateChunkError> { + let mut queue = self.upload_queue.lock().await; + + let mut batches = vec![]; + while queue + .iter() + .map(|(_, content)| content.len()) + .sum::() + > max_retained_bytes + || queue.len() > max_retained_chunks + { + // Greedily fills batch with the largest chunk that fits + queue.sort_unstable_by_key(|(_, content)| content.len()); + let mut batch = vec![]; + let mut batch_size = 0; + for (uploader_chunk_id, content) in std::mem::take(&mut *queue).into_iter().rev() { + if content.len() <= MAX_CHUNK_SIZE - batch_size { + batch_size += content.len(); + batch.push((uploader_chunk_id, content)); + } else { + queue.push((uploader_chunk_id, content)); + } + } + batches.push(batch); + } + + try_join_all(batches.into_iter().map(|chunks| async move { + let (uploader_chunk_ids, chunks): (Vec<_>, Vec<_>) = chunks.into_iter().unzip(); + let canister_chunk_ids = + create_chunks(&self.canister, &self.batch_id, chunks, semaphores).await?; + let mut map = self.id_mapping.lock().await; + for (uploader_id, canister_id) in uploader_chunk_ids + .into_iter() + .zip(canister_chunk_ids.into_iter()) + { + map.insert(uploader_id, canister_id); + } + Ok(()) + })) + .await?; + + Ok(()) + } } #[allow(clippy::too_many_arguments)] @@ -110,7 +225,7 @@ async fn make_project_asset_encoding( false }; - let chunk_ids = if already_in_place { + let uploader_chunk_ids = if already_in_place { info!( logger, " {}{} ({} bytes) sha {} is already installed", @@ -144,7 +259,7 @@ async fn make_project_asset_encoding( }; Ok(ProjectAssetEncoding { - chunk_ids, + uploader_chunk_ids, sha256, already_in_place, }) @@ -305,6 +420,13 @@ pub(crate) async fn make_project_assets( }) .collect(); let project_assets = try_join_all(project_asset_futures).await?; + if let Some(uploader) = chunk_upload_target { + uploader.finalize_upload(&semaphores).await.map_err(|err| { + CreateProjectAssetError::CreateEncodingError(CreateEncodingError::CreateChunkFailed( + err, + )) + })?; + } let mut hm = HashMap::new(); for project_asset in project_assets { @@ -321,7 +443,7 @@ async fn upload_content_chunks( content_encoding: &str, semaphores: &Semaphores, logger: &Logger, -) -> Result, CreateChunkError> { +) -> Result, CreateChunkError> { if content.data.is_empty() { let empty = vec![]; let chunk_id = chunk_uploader.create_chunk(&empty, semaphores).await?; diff --git a/src/canisters/frontend/ic-asset/src/batch_upload/semaphores.rs b/src/canisters/frontend/ic-asset/src/batch_upload/semaphores.rs index 1f86278586..6661184d10 100644 --- a/src/canisters/frontend/ic-asset/src/batch_upload/semaphores.rs +++ b/src/canisters/frontend/ic-asset/src/batch_upload/semaphores.rs @@ -6,30 +6,31 @@ const MAX_SIMULTANEOUS_LOADED_MB: usize = 50; // How many simultaneous chunks being created at once const MAX_SIMULTANEOUS_CREATE_CHUNK: usize = 50; -// How many simultaneous Agent.call() to create_chunk +// How many simultaneous Agent.call() to create_chunk(s) const MAX_SIMULTANEOUS_CREATE_CHUNK_CALLS: usize = 25; -// How many simultaneous Agent.wait() on create_chunk result +// How many simultaneous Agent.wait() on create_chunk(s) result const MAX_SIMULTANEOUS_CREATE_CHUNK_WAITS: usize = 25; +#[derive(Debug)] pub(crate) struct Semaphores { // The "file" semaphore limits how much file data to load at once. A given loaded file's data // may be simultaneously encoded (gzip and so forth). pub file: SharedSemaphore, - // The create_chunk semaphore limits the number of chunks that can be in the process - // of being created at one time. Since each chunk creation can involve retries, - // this focuses those retries on a smaller number of chunks. + // The create_chunk semaphore limits the number of chunk creation calls + // that can be in progress at one time. Since each chunk creation can involve retries, + // this focuses those retries on a smaller number of calls. // Without this semaphore, every chunk would make its first attempt, before // any chunk made its second attempt. pub create_chunk: SharedSemaphore, // The create_chunk_call semaphore limits the number of simultaneous - // agent.call()s to create_chunk. + // agent.call()s to create_chunk(s). pub create_chunk_call: SharedSemaphore, // The create_chunk_wait semaphore limits the number of simultaneous - // agent.wait() calls for outstanding create_chunk requests. + // agent.wait() calls for outstanding create_chunk(s) requests. pub create_chunk_wait: SharedSemaphore, } diff --git a/src/canisters/frontend/ic-asset/src/canister_api/methods/chunk.rs b/src/canisters/frontend/ic-asset/src/canister_api/methods/chunk.rs index f63659c866..79c6ea1096 100644 --- a/src/canisters/frontend/ic-asset/src/canister_api/methods/chunk.rs +++ b/src/canisters/frontend/ic-asset/src/canister_api/methods/chunk.rs @@ -1,7 +1,9 @@ use crate::batch_upload::retryable::retryable; use crate::batch_upload::semaphores::Semaphores; use crate::canister_api::methods::method_names::CREATE_CHUNK; -use crate::canister_api::types::batch_upload::common::{CreateChunkRequest, CreateChunkResponse}; +use crate::canister_api::types::batch_upload::common::{ + CreateChunkRequest, CreateChunkResponse, CreateChunksRequest, CreateChunksResponse, +}; use crate::error::CreateChunkError; use backoff::backoff::Backoff; use backoff::ExponentialBackoffBuilder; @@ -10,6 +12,8 @@ use ic_agent::agent::CallResponse; use ic_utils::Canister; use std::time::Duration; +use super::method_names::CREATE_CHUNKS; + pub(crate) async fn create_chunk( canister: &Canister<'_>, batch_id: &Nat, @@ -66,3 +70,60 @@ pub(crate) async fn create_chunk( } } } + +pub(crate) async fn create_chunks( + canister: &Canister<'_>, + batch_id: &Nat, + content: Vec>, + semaphores: &Semaphores, +) -> Result, CreateChunkError> { + let _chunk_releaser = semaphores.create_chunk.acquire(1).await; + let batch_id = batch_id.clone(); + let args = CreateChunksRequest { batch_id, content }; + let mut retry_policy = ExponentialBackoffBuilder::new() + .with_initial_interval(Duration::from_secs(1)) + .with_max_interval(Duration::from_secs(16)) + .with_multiplier(2.0) + .with_max_elapsed_time(Some(Duration::from_secs(300))) + .build(); + + loop { + let builder = canister.update(CREATE_CHUNKS); + let builder = builder.with_arg(&args); + let request_id_result = { + let _releaser = semaphores.create_chunk_call.acquire(1).await; + builder + .build() + .map(|result: (CreateChunksResponse,)| (result.0.chunk_ids,)) + .call() + .await + }; + + let wait_result = match request_id_result { + Ok(resp) => match resp { + CallResponse::Response(r) => Ok(r), + CallResponse::Poll(id) => { + let _releaser = semaphores.create_chunk_wait.acquire(1).await; + canister + .wait(&id) + .await + .and_then(|bytes| Ok((Decode!(&bytes, CreateChunksResponse)?.chunk_ids,))) + } + }, + Err(agent_err) => Err(agent_err), + }; + + match wait_result { + Ok((chunk_ids,)) => { + return Ok(chunk_ids); + } + Err(agent_err) if !retryable(&agent_err) => { + return Err(CreateChunkError::CreateChunks(agent_err)); + } + Err(agent_err) => match retry_policy.next_backoff() { + Some(duration) => tokio::time::sleep(duration).await, + None => return Err(CreateChunkError::CreateChunks(agent_err)), + }, + } + } +} diff --git a/src/canisters/frontend/ic-asset/src/canister_api/methods/method_names.rs b/src/canisters/frontend/ic-asset/src/canister_api/methods/method_names.rs index 5cfc36e910..d8fd4d83a2 100644 --- a/src/canisters/frontend/ic-asset/src/canister_api/methods/method_names.rs +++ b/src/canisters/frontend/ic-asset/src/canister_api/methods/method_names.rs @@ -3,6 +3,7 @@ pub(crate) const COMMIT_BATCH: &str = "commit_batch"; pub(crate) const COMPUTE_EVIDENCE: &str = "compute_evidence"; pub(crate) const CREATE_BATCH: &str = "create_batch"; pub(crate) const CREATE_CHUNK: &str = "create_chunk"; +pub(crate) const CREATE_CHUNKS: &str = "create_chunks"; pub(crate) const GET_ASSET_PROPERTIES: &str = "get_asset_properties"; pub(crate) const LIST: &str = "list"; pub(crate) const PROPOSE_COMMIT_BATCH: &str = "propose_commit_batch"; diff --git a/src/canisters/frontend/ic-asset/src/canister_api/types/batch_upload/common.rs b/src/canisters/frontend/ic-asset/src/canister_api/types/batch_upload/common.rs index 2c9b82468e..7e36d1d5ad 100644 --- a/src/canisters/frontend/ic-asset/src/canister_api/types/batch_upload/common.rs +++ b/src/canisters/frontend/ic-asset/src/canister_api/types/batch_upload/common.rs @@ -27,13 +27,31 @@ pub struct CreateChunkRequest<'a> { pub content: &'a [u8], } -/// The responst to a CreateChunkRequest. +/// The response to a CreateChunkRequest. #[derive(CandidType, Debug, Deserialize)] pub struct CreateChunkResponse { /// The ID of the created chunk. pub chunk_id: Nat, } +/// Upload multiple chunks of data that are part of asset content. +#[derive(CandidType, Debug, Deserialize)] +pub struct CreateChunksRequest { + /// The batch with which to associate the created chunks. + /// The chunk will be deleted if the batch expires before being committed. + pub batch_id: Nat, + + /// The data in this chunk. + pub content: Vec>, +} + +/// The response to a CreateChunksRequest. +#[derive(CandidType, Debug, Deserialize)] +pub struct CreateChunksResponse { + /// The IDs of the created chunks. + pub chunk_ids: Vec, +} + /// Create a new asset. Has no effect if the asset already exists and the content type matches. /// Traps if the asset already exists but with a different content type. #[derive(CandidType, Clone, Debug, PartialOrd, PartialEq, Eq, Ord)] diff --git a/src/canisters/frontend/ic-asset/src/error/create_chunk.rs b/src/canisters/frontend/ic-asset/src/error/create_chunk.rs index d982c0e453..539af922fa 100644 --- a/src/canisters/frontend/ic-asset/src/error/create_chunk.rs +++ b/src/canisters/frontend/ic-asset/src/error/create_chunk.rs @@ -8,6 +8,10 @@ pub enum CreateChunkError { #[error("Failed to create chunk: {0}")] CreateChunk(AgentError), + /// Failed in call to create_chunks, or in waiting for response. + #[error("Failed to create chunks: {0}")] + CreateChunks(AgentError), + /// Failed to decode the create chunk response. #[error("Failed to decode create chunk response: {0}")] DecodeCreateChunkResponse(candid::Error), diff --git a/src/canisters/frontend/ic-asset/src/evidence/mod.rs b/src/canisters/frontend/ic-asset/src/evidence/mod.rs index 71b22b3a27..18ee8fc390 100644 --- a/src/canisters/frontend/ic-asset/src/evidence/mod.rs +++ b/src/canisters/frontend/ic-asset/src/evidence/mod.rs @@ -59,11 +59,13 @@ pub async fn compute_evidence( make_project_assets(None, asset_descriptors, &canister_assets, logger).await?; let mut operations = assemble_batch_operations( + None, &project_assets, canister_assets, Obsolete, canister_asset_properties, - ); + ) + .await; operations.sort(); let mut sha = Sha256::new(); diff --git a/src/canisters/frontend/ic-asset/src/sync.rs b/src/canisters/frontend/ic-asset/src/sync.rs index c229416b3c..ca5be49869 100644 --- a/src/canisters/frontend/ic-asset/src/sync.rs +++ b/src/canisters/frontend/ic-asset/src/sync.rs @@ -45,6 +45,7 @@ const KNOWN_DIRECTORIES: [&str; 1] = [".well-known"]; /// Sets the contents of the asset canister to the contents of a directory, including deleting old assets. pub async fn upload_content_and_assemble_sync_operations( canister: &Canister<'_>, + canister_api_version: u16, dirs: &[&Path], no_delete: bool, logger: &Logger, @@ -74,7 +75,8 @@ pub async fn upload_content_and_assemble_sync_operations( "Staging contents of new and changed assets in batch {}:", batch_id ); - let chunk_uploader = ChunkUploader::new(canister.clone(), batch_id.clone()); + let chunk_uploader = + ChunkUploader::new(canister.clone(), canister_api_version, batch_id.clone()); let project_assets = make_project_assets( Some(&chunk_uploader), @@ -85,6 +87,7 @@ pub async fn upload_content_and_assemble_sync_operations( .await?; let commit_batch_args = batch_upload::operations::assemble_commit_batch_arguments( + &chunk_uploader, project_assets, canister_assets, match no_delete { @@ -93,7 +96,8 @@ pub async fn upload_content_and_assemble_sync_operations( }, canister_asset_properties, batch_id, - ); + ) + .await; // -v debug!( @@ -121,9 +125,15 @@ pub async fn sync( no_delete: bool, logger: &Logger, ) -> Result<(), SyncError> { - let commit_batch_args = - upload_content_and_assemble_sync_operations(canister, dirs, no_delete, logger).await?; let canister_api_version = api_version(canister).await; + let commit_batch_args = upload_content_and_assemble_sync_operations( + canister, + canister_api_version, + dirs, + no_delete, + logger, + ) + .await?; debug!(logger, "Canister API version: {canister_api_version}. ic-asset API version: {BATCH_UPLOAD_API_VERSION}"); info!(logger, "Committing batch."); match canister_api_version { @@ -196,7 +206,15 @@ pub async fn prepare_sync_for_proposal( dirs: &[&Path], logger: &Logger, ) -> Result<(Nat, ByteBuf), PrepareSyncForProposalError> { - let arg = upload_content_and_assemble_sync_operations(canister, dirs, false, logger).await?; + let canister_api_version = api_version(canister).await; + let arg = upload_content_and_assemble_sync_operations( + canister, + canister_api_version, + dirs, + false, + logger, + ) + .await?; let arg = sort_batch_operations(arg); let batch_id = arg.batch_id.clone(); diff --git a/src/canisters/frontend/ic-asset/src/upload.rs b/src/canisters/frontend/ic-asset/src/upload.rs index 7057a9f4b1..c987b49eb4 100644 --- a/src/canisters/frontend/ic-asset/src/upload.rs +++ b/src/canisters/frontend/ic-asset/src/upload.rs @@ -39,10 +39,12 @@ pub async fn upload( info!(logger, "Starting batch."); let batch_id = create_batch(canister).await.map_err(CreateBatchFailed)?; + let canister_api_version = api_version(canister).await; info!(logger, "Staging contents of new and changed assets:"); - let chunk_upload_target = ChunkUploader::new(canister.clone(), batch_id.clone()); + let chunk_upload_target = + ChunkUploader::new(canister.clone(), canister_api_version, batch_id.clone()); let project_assets = make_project_assets( Some(&chunk_upload_target), @@ -53,12 +55,14 @@ pub async fn upload( .await?; let commit_batch_args = batch_upload::operations::assemble_commit_batch_arguments( + &chunk_upload_target, project_assets, canister_assets, AssetDeletionReason::Incompatible, HashMap::new(), batch_id, - ); + ) + .await; let canister_api_version = api_version(canister).await; info!(logger, "Committing batch."); diff --git a/src/canisters/frontend/ic-certified-assets/src/lib.rs b/src/canisters/frontend/ic-certified-assets/src/lib.rs index 5f926ace06..44847a1e1c 100644 --- a/src/canisters/frontend/ic-certified-assets/src/lib.rs +++ b/src/canisters/frontend/ic-certified-assets/src/lib.rs @@ -35,7 +35,7 @@ thread_local! { #[query] #[candid_method(query)] fn api_version() -> u16 { - 1 + 2 } #[update(guard = "is_manager_or_controller")] diff --git a/src/distributed/assetstorage.wasm.gz b/src/distributed/assetstorage.wasm.gz index 1b691cb7ca..082ae64e9b 100755 Binary files a/src/distributed/assetstorage.wasm.gz and b/src/distributed/assetstorage.wasm.gz differ