diff --git a/src/action.rs b/src/action.rs index 480e24ae7..308c6ae38 100644 --- a/src/action.rs +++ b/src/action.rs @@ -1,6 +1,7 @@ //! Action builder types. mod aggregate; +mod bulk_write; mod client_options; mod count; mod create_collection; @@ -31,8 +32,10 @@ mod watch; use std::{future::IntoFuture, marker::PhantomData, ops::Deref}; +use crate::bson::Document; + pub use aggregate::Aggregate; -use bson::Document; +pub use bulk_write::BulkWrite; pub use client_options::ParseConnectionString; pub use count::{CountDocuments, EstimatedDocumentCount}; pub use create_collection::CreateCollection; diff --git a/src/action/bulk_write.rs b/src/action/bulk_write.rs new file mode 100644 index 000000000..96996b3fd --- /dev/null +++ b/src/action/bulk_write.rs @@ -0,0 +1,227 @@ +#![allow(missing_docs)] + +use std::collections::HashMap; + +use crate::{ + bson::{Bson, Document}, + error::{ClientBulkWriteError, Error, ErrorKind, Result}, + operation::bulk_write::BulkWrite as BulkWriteOperation, + options::{BulkWriteOptions, WriteConcern, WriteModel}, + results::BulkWriteResult, + Client, + ClientSession, +}; + +use super::{action_impl, option_setters}; + +impl Client { + pub fn bulk_write(&self, models: impl IntoIterator) -> BulkWrite { + BulkWrite::new(self, models.into_iter().collect()) + } +} + +#[must_use] +pub struct BulkWrite<'a> { + client: &'a Client, + models: Vec, + options: Option, + session: Option<&'a mut ClientSession>, +} + +impl<'a> BulkWrite<'a> { + option_setters!(options: BulkWriteOptions; + ordered: bool, + bypass_document_validation: bool, + comment: Bson, + let_vars: Document, + verbose_results: bool, + write_concern: WriteConcern, + ); + + pub fn session(mut self, session: &'a mut ClientSession) -> BulkWrite<'a> { + self.session = Some(session); + self + } + + fn new(client: &'a Client, models: Vec) -> Self { + Self { + client, + models, + options: None, + session: None, + } + } + + fn is_ordered(&self) -> bool { + self.options + .as_ref() + .and_then(|options| options.ordered) + .unwrap_or(true) + } +} + +#[action_impl] +impl<'a> Action for BulkWrite<'a> { + type Future = BulkWriteFuture; + + async fn execute(mut self) -> Result { + #[cfg(feature = "in-use-encryption-unstable")] + if self.client.should_auto_encrypt().await { + use mongocrypt::error::{Error as EncryptionError, ErrorKind as EncryptionErrorKind}; + + let error = EncryptionError { + kind: EncryptionErrorKind::Client, + code: None, + message: Some( + "bulkWrite does not currently support automatic encryption".to_string(), + ), + }; + return Err(ErrorKind::Encryption(error).into()); + } + + resolve_write_concern_with_session!( + self.client, + self.options, + self.session.as_deref_mut() + )?; + + let mut total_attempted = 0; + let mut execution_status = ExecutionStatus::None; + + while total_attempted < self.models.len() + && execution_status.should_continue(self.is_ordered()) + { + let mut operation = BulkWriteOperation::new( + self.client.clone(), + &self.models[total_attempted..], + total_attempted, + self.options.as_ref(), + ) + .await; + let result = self + .client + .execute_operation::( + &mut operation, + self.session.as_deref_mut(), + ) + .await; + total_attempted += operation.n_attempted; + + match result { + Ok(result) => { + execution_status = execution_status.with_success(result); + } + Err(error) => { + execution_status = execution_status.with_failure(error); + } + } + } + + match execution_status { + ExecutionStatus::Success(bulk_write_result) => Ok(bulk_write_result), + ExecutionStatus::Error(error) => Err(error), + ExecutionStatus::None => Err(ErrorKind::InvalidArgument { + message: "bulk_write must be provided at least one write operation".into(), + } + .into()), + } + } +} + +/// Represents the execution status of a bulk write. The status starts at `None`, indicating that no +/// writes have been attempted yet, and transitions to either `Success` or `Error` as batches are +/// executed. The contents of `Error` can be inspected to determine whether a bulk write can +/// continue with further batches or should be terminated. +enum ExecutionStatus { + Success(BulkWriteResult), + Error(Error), + None, +} + +impl ExecutionStatus { + fn with_success(mut self, result: BulkWriteResult) -> Self { + match self { + // Merge two successful sets of results together. + Self::Success(ref mut current_result) => { + current_result.merge(result); + self + } + // Merge the results of the new batch into the existing bulk write error. + Self::Error(ref mut current_error) => { + let bulk_write_error = Self::get_current_bulk_write_error(current_error); + bulk_write_error.merge_partial_results(result); + self + } + Self::None => Self::Success(result), + } + } + + fn with_failure(self, mut error: Error) -> Self { + match self { + // If the new error is a BulkWriteError, merge the successful results into the error's + // partial result. Otherwise, create a new BulkWriteError with the existing results and + // set its source as the error that just occurred. + Self::Success(current_result) => match *error.kind { + ErrorKind::ClientBulkWrite(ref mut bulk_write_error) => { + bulk_write_error.merge_partial_results(current_result); + Self::Error(error) + } + _ => { + let bulk_write_error: Error = + ErrorKind::ClientBulkWrite(ClientBulkWriteError { + write_errors: HashMap::new(), + write_concern_errors: Vec::new(), + partial_result: Some(current_result), + }) + .into(); + Self::Error(bulk_write_error.with_source(error)) + } + }, + // If the new error is a BulkWriteError, merge its contents with the existing error. + // Otherwise, set the new error as the existing error's source. + Self::Error(mut current_error) => match *error.kind { + ErrorKind::ClientBulkWrite(bulk_write_error) => { + let current_bulk_write_error = + Self::get_current_bulk_write_error(&mut current_error); + current_bulk_write_error.merge(bulk_write_error); + Self::Error(current_error) + } + _ => Self::Error(current_error.with_source(error)), + }, + Self::None => Self::Error(error), + } + } + + /// Gets a BulkWriteError from a given Error. This method should only be called when adding a + /// new result or error to the existing state, as it requires that the given Error's kind is + /// ClientBulkWrite. + fn get_current_bulk_write_error(error: &mut Error) -> &mut ClientBulkWriteError { + match *error.kind { + ErrorKind::ClientBulkWrite(ref mut bulk_write_error) => bulk_write_error, + _ => unreachable!(), + } + } + + /// Whether further bulk write batches should be executed based on the current status of + /// execution. + fn should_continue(&self, ordered: bool) -> bool { + match self { + Self::Error(ref error) => { + match *error.kind { + ErrorKind::ClientBulkWrite(ref bulk_write_error) => { + // A top-level error is always fatal. + let top_level_error_occurred = error.source.is_some(); + // A write error occurring during an ordered bulk write is fatal. + let terminal_write_error_occurred = + ordered && !bulk_write_error.write_errors.is_empty(); + + !top_level_error_occurred && !terminal_write_error_occurred + } + // A top-level error is always fatal. + _ => false, + } + } + _ => true, + } + } +} diff --git a/src/action/insert_many.rs b/src/action/insert_many.rs index 0c2e349d1..a6c0040c3 100644 --- a/src/action/insert_many.rs +++ b/src/action/insert_many.rs @@ -103,10 +103,7 @@ impl<'a> Action for InsertMany<'a> { .as_ref() .and_then(|o| o.ordered) .unwrap_or(true); - #[cfg(feature = "in-use-encryption-unstable")] - let encrypted = self.coll.client().auto_encryption_opts().await.is_some(); - #[cfg(not(feature = "in-use-encryption-unstable"))] - let encrypted = false; + let encrypted = self.coll.client().should_auto_encrypt().await; let mut cumulative_failure: Option = None; let mut error_labels: HashSet = Default::default(); diff --git a/src/action/insert_one.rs b/src/action/insert_one.rs index edaebea11..f7d70afcc 100644 --- a/src/action/insert_one.rs +++ b/src/action/insert_one.rs @@ -87,18 +87,13 @@ impl<'a> Action for InsertOne<'a> { async fn execute(mut self) -> Result { resolve_write_concern_with_session!(self.coll, self.options, self.session.as_ref())?; - #[cfg(feature = "in-use-encryption-unstable")] - let encrypted = self.coll.client().auto_encryption_opts().await.is_some(); - #[cfg(not(feature = "in-use-encryption-unstable"))] - let encrypted = false; - let doc = self.doc?; let insert = Op::new( self.coll.namespace(), vec![doc.deref()], self.options.map(InsertManyOptions::from_insert_one_options), - encrypted, + self.coll.client().should_auto_encrypt().await, ); self.coll .client() diff --git a/src/bson_util.rs b/src/bson_util.rs index 185b43018..06c11f062 100644 --- a/src/bson_util.rs +++ b/src/bson_util.rs @@ -4,7 +4,16 @@ use std::{ }; use crate::{ - bson::{Bson, Document, RawArrayBuf, RawBson, RawBsonRef, RawDocumentBuf}, + bson::{ + oid::ObjectId, + rawdoc, + Bson, + Document, + RawArrayBuf, + RawBson, + RawBsonRef, + RawDocumentBuf, + }, checked::Checked, error::{ErrorKind, Result}, runtime::SyncLittleEndianRead, @@ -57,38 +66,51 @@ pub(crate) fn to_raw_bson_array(docs: &[Document]) -> Result { Ok(RawBson::Array(array)) } -#[cfg(test)] -pub(crate) fn sort_document(document: &mut Document) { - let temp = std::mem::take(document); - - let mut elements: Vec<_> = temp.into_iter().collect(); - elements.sort_by(|e1, e2| e1.0.cmp(&e2.0)); - - document.extend(elements); -} - pub(crate) fn first_key(document: &Document) -> Option<&str> { document.keys().next().map(String::as_str) } -pub(crate) fn replacement_raw_document_check(replacement: &RawDocumentBuf) -> Result<()> { - match replacement.iter().next().transpose()? { - Some((key, _)) if !key.starts_with('$') => Ok(()), - _ => Err(ErrorKind::InvalidArgument { - message: "replace document must have first key not starting with '$'".to_string(), +pub(crate) fn update_document_check(update: &Document) -> Result<()> { + match first_key(update) { + Some(key) => { + if !key.starts_with('$') { + Err(ErrorKind::InvalidArgument { + message: "update document must only contain update modifiers".to_string(), + } + .into()) + } else { + Ok(()) + } + } + None => Err(ErrorKind::InvalidArgument { + message: "update document must not be empty".to_string(), } .into()), } } -pub(crate) fn update_document_check(update: &Document) -> Result<()> { - match first_key(update) { - Some(s) if s.starts_with('$') => Ok(()), - _ => Err(ErrorKind::InvalidArgument { - message: "update document must have first key starting with '$".to_string(), +pub(crate) fn replacement_document_check(replacement: &Document) -> Result<()> { + if let Some(key) = first_key(replacement) { + if key.starts_with('$') { + return Err(ErrorKind::InvalidArgument { + message: "replacement document must not contain update modifiers".to_string(), + } + .into()); } - .into()), } + Ok(()) +} + +pub(crate) fn replacement_raw_document_check(replacement: &RawDocumentBuf) -> Result<()> { + if let Some((key, _)) = replacement.iter().next().transpose()? { + if key.starts_with('$') { + return Err(ErrorKind::InvalidArgument { + message: "replacement document must not contain update modifiers".to_string(), + } + .into()); + }; + } + Ok(()) } /// The size in bytes of the provided document's entry in a BSON array at the given index. @@ -101,6 +123,14 @@ pub(crate) fn array_entry_size_bytes(index: usize, doc_len: usize) -> Result) -> RawArrayBuf { + let mut array = RawArrayBuf::new(); + for doc in docs { + array.push(doc); + } + array +} + /// The number of digits in `n` in base 10. /// Useful for calculating the size of an array entry in BSON. fn num_decimal_digits(mut n: usize) -> usize { @@ -139,6 +169,30 @@ pub(crate) fn extend_raw_document_buf( Ok(()) } +/// Returns the _id field of this document, prepending the field to the document if one is not +/// already present. +pub(crate) fn get_or_prepend_id_field(doc: &mut RawDocumentBuf) -> Result { + match doc.get("_id")? { + Some(id) => Ok(id.try_into()?), + None => { + let id = ObjectId::new(); + let mut new_bytes = rawdoc! { "_id": id }.into_bytes(); + + // Remove the trailing null byte (which will be replaced by the null byte in the given + // document) and append the document's elements + new_bytes.pop(); + new_bytes.extend(&doc.as_bytes()[4..]); + + let new_length: i32 = Checked::new(new_bytes.len()).try_into()?; + new_bytes[0..4].copy_from_slice(&new_length.to_le_bytes()); + + *doc = RawDocumentBuf::from_bytes(new_bytes)?; + + Ok(id.into()) + } + } +} + #[cfg(test)] mod test { use crate::bson_util::num_decimal_digits; diff --git a/src/client.rs b/src/client.rs index 0fc310fb0..f3999d641 100644 --- a/src/client.rs +++ b/src/client.rs @@ -211,6 +211,26 @@ impl Client { )) } + /// Whether commands sent via this client should be auto-encrypted. + pub(crate) async fn should_auto_encrypt(&self) -> bool { + #[cfg(feature = "in-use-encryption-unstable")] + { + let csfle = self.inner.csfle.read().await; + match *csfle { + Some(ref csfle) => csfle + .opts() + .bypass_auto_encryption + .map(|b| !b) + .unwrap_or(true), + None => false, + } + } + #[cfg(not(feature = "in-use-encryption-unstable"))] + { + false + } + } + #[cfg(all(test, feature = "in-use-encryption-unstable"))] pub(crate) async fn mongocryptd_spawned(&self) -> bool { self.inner diff --git a/src/client/executor.rs b/src/client/executor.rs index 1cbe9886e..f96e0604f 100644 --- a/src/client/executor.rs +++ b/src/client/executor.rs @@ -7,6 +7,7 @@ use once_cell::sync::Lazy; use serde::de::DeserializeOwned; use std::{ + borrow::BorrowMut, collections::HashSet, sync::{atomic::Ordering, Arc}, time::Instant, @@ -52,6 +53,7 @@ use crate::{ AbortTransaction, CommandErrorBody, CommitTransaction, + ExecutionContext, Operation, Retryability, }, @@ -90,17 +92,17 @@ impl Client { /// sessions and an explicit session is not provided. pub(crate) async fn execute_operation( &self, - op: T, + mut op: impl BorrowMut, session: impl Into>, ) -> Result { - self.execute_operation_with_details(op, session) + self.execute_operation_with_details(op.borrow_mut(), session) .await .map(|details| details.output) } async fn execute_operation_with_details( &self, - op: T, + op: &mut T, session: impl Into>, ) -> Result> { if self.inner.shutdown.executed.load(Ordering::SeqCst) { @@ -144,12 +146,17 @@ impl Client { /// Execute the given operation, returning the cursor created by the operation. /// /// Server selection be will performed using the criteria specified on the operation, if any. - pub(crate) async fn execute_cursor_operation(&self, op: Op) -> Result> + pub(crate) async fn execute_cursor_operation( + &self, + mut op: impl BorrowMut, + ) -> Result> where Op: Operation, { Box::pin(async { - let mut details = self.execute_operation_with_details(op, None).await?; + let mut details = self + .execute_operation_with_details(op.borrow_mut(), None) + .await?; let pinned = self.pin_connection_for_cursor(&details.output, &mut details.connection)?; Ok(Cursor::new( @@ -164,14 +171,14 @@ impl Client { pub(crate) async fn execute_session_cursor_operation( &self, - op: Op, + mut op: impl BorrowMut, session: &mut ClientSession, ) -> Result> where Op: Operation, { let mut details = self - .execute_operation_with_details(op, &mut *session) + .execute_operation_with_details(op.borrow_mut(), &mut *session) .await?; let pinned = @@ -183,7 +190,7 @@ impl Client { self.inner.options.load_balanced.unwrap_or(false) } - fn pin_connection_for_cursor( + pub(crate) fn pin_connection_for_cursor( &self, spec: &CursorSpecification, conn: &mut Connection, @@ -229,10 +236,10 @@ impl Client { let mut implicit_session = resume_data .as_mut() .and_then(|rd| rd.implicit_session.take()); - let op = ChangeStreamAggregate::new(&args, resume_data)?; + let mut op = ChangeStreamAggregate::new(&args, resume_data)?; let mut details = self - .execute_operation_with_details(op, implicit_session.as_mut()) + .execute_operation_with_details(&mut op, implicit_session.as_mut()) .await?; if let Some(session) = implicit_session { details.implicit_session = Some(session); @@ -264,10 +271,10 @@ impl Client { target, options, }; - let op = ChangeStreamAggregate::new(&args, resume_data)?; + let mut op = ChangeStreamAggregate::new(&args, resume_data)?; let mut details = self - .execute_operation_with_details(op, &mut *session) + .execute_operation_with_details(&mut op, &mut *session) .await?; let (cursor_spec, cs_data) = details.output; let pinned = @@ -284,7 +291,7 @@ impl Client { /// reauthenticating if reauthentication is required. async fn execute_operation_with_retry( &self, - mut op: T, + op: &mut T, mut session: Option<&mut ClientSession>, ) -> Result> { // If the current transaction has been committed/aborted and it is not being @@ -331,7 +338,7 @@ impl Client { }; let server_addr = server.address.clone(); - let mut conn = match get_connection(&session, &op, &server.pool).await { + let mut conn = match get_connection(&session, op, &server.pool).await { Ok(c) => c, Err(mut err) => { retry.first_error()?; @@ -341,7 +348,7 @@ impl Client { err.add_label(RETRYABLE_WRITE_ERROR); } - let op_retry = match self.get_op_retryability(&op, &session) { + let op_retry = match self.get_op_retryability(op, &session) { Retryability::Read => err.is_read_retryable(), Retryability::Write => err.is_write_retryable(), _ => false, @@ -372,7 +379,7 @@ impl Client { session = implicit_session.as_mut(); } - let retryability = self.get_retryability(&conn, &op, &session)?; + let retryability = self.get_retryability(&conn, op, &session)?; if retryability == Retryability::None { retry.first_error()?; } @@ -384,7 +391,7 @@ impl Client { let details = match self .execute_operation_on_connection( - &mut op, + op, &mut conn, &mut session, txn_number, @@ -785,7 +792,12 @@ impl Client { } }; - match op.handle_response(response, connection.stream_description()?) { + let context = ExecutionContext { + connection, + session: session.as_deref_mut(), + }; + + match op.handle_response(response, context).await { Ok(response) => Ok(response), Err(mut err) => { err.add_labels_and_update_pin( diff --git a/src/client/options.rs b/src/client/options.rs index 9c762fde3..a99577e36 100644 --- a/src/client/options.rs +++ b/src/client/options.rs @@ -1,6 +1,7 @@ #[cfg(test)] mod test; +mod bulk_write; mod parse; mod resolver_config; @@ -44,6 +45,7 @@ use crate::{ srv::{OriginalSrvInfo, SrvResolver}, }; +pub use bulk_write::*; #[cfg(feature = "dns-resolver")] pub use resolver_config::ResolverConfig; #[cfg(not(feature = "dns-resolver"))] diff --git a/src/client/options/bulk_write.rs b/src/client/options/bulk_write.rs new file mode 100644 index 000000000..d8558d97f --- /dev/null +++ b/src/client/options/bulk_write.rs @@ -0,0 +1,215 @@ +#![allow(missing_docs)] + +use serde::{ser::SerializeMap, Deserialize, Serialize}; +use serde_with::skip_serializing_none; + +use crate::{ + bson::{rawdoc, Array, Bson, Document, RawDocumentBuf}, + bson_util::{get_or_prepend_id_field, replacement_document_check, update_document_check}, + error::Result, + options::{UpdateModifications, WriteConcern}, + Namespace, +}; + +#[skip_serializing_none] +#[derive(Clone, Debug, Default, Deserialize)] +#[serde(rename_all = "camelCase")] +#[non_exhaustive] +pub struct BulkWriteOptions { + pub ordered: Option, + pub bypass_document_validation: Option, + pub comment: Option, + #[serde(rename = "let")] + pub let_vars: Option, + pub verbose_results: Option, + pub write_concern: Option, +} + +impl Serialize for BulkWriteOptions { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + let BulkWriteOptions { + ordered, + bypass_document_validation, + comment, + let_vars, + verbose_results, + write_concern, + } = self; + + let mut map_serializer = serializer.serialize_map(None)?; + + let ordered = ordered.unwrap_or(true); + map_serializer.serialize_entry("ordered", &ordered)?; + + if let Some(bypass_document_validation) = bypass_document_validation { + map_serializer + .serialize_entry("bypassDocumentValidation", bypass_document_validation)?; + } + + if let Some(ref comment) = comment { + map_serializer.serialize_entry("comment", comment)?; + } + + if let Some(ref let_vars) = let_vars { + map_serializer.serialize_entry("let", let_vars)?; + } + + let errors_only = verbose_results.map(|b| !b).unwrap_or(true); + map_serializer.serialize_entry("errorsOnly", &errors_only)?; + + if let Some(ref write_concern) = write_concern { + map_serializer.serialize_entry("writeConcern", write_concern)?; + } + + map_serializer.end() + } +} + +#[skip_serializing_none] +#[derive(Clone, Debug, Serialize)] +#[serde(untagged)] +#[non_exhaustive] +pub enum WriteModel { + #[non_exhaustive] + InsertOne { + #[serde(skip)] + namespace: Namespace, + document: Document, + }, + #[non_exhaustive] + #[serde(rename_all = "camelCase")] + UpdateOne { + #[serde(skip)] + namespace: Namespace, + filter: Document, + #[serde(rename = "updateMods")] + update: UpdateModifications, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, + }, + #[non_exhaustive] + #[serde(rename_all = "camelCase")] + UpdateMany { + #[serde(skip)] + namespace: Namespace, + filter: Document, + #[serde(rename = "updateMods")] + update: UpdateModifications, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, + }, + #[non_exhaustive] + #[serde(rename_all = "camelCase")] + ReplaceOne { + #[serde(skip)] + namespace: Namespace, + filter: Document, + #[serde(rename = "updateMods")] + replacement: Document, + collation: Option, + hint: Option, + upsert: Option, + }, + #[non_exhaustive] + DeleteOne { + #[serde(skip)] + namespace: Namespace, + filter: Document, + collation: Option, + hint: Option, + }, + #[non_exhaustive] + DeleteMany { + #[serde(skip)] + namespace: Namespace, + filter: Document, + collation: Option, + hint: Option, + }, +} + +pub(crate) enum OperationType { + Insert, + Update, + Delete, +} + +impl WriteModel { + pub(crate) fn namespace(&self) -> &Namespace { + match self { + Self::InsertOne { namespace, .. } => namespace, + Self::UpdateOne { namespace, .. } => namespace, + Self::UpdateMany { namespace, .. } => namespace, + Self::ReplaceOne { namespace, .. } => namespace, + Self::DeleteOne { namespace, .. } => namespace, + Self::DeleteMany { namespace, .. } => namespace, + } + } + + pub(crate) fn operation_type(&self) -> OperationType { + match self { + Self::InsertOne { .. } => OperationType::Insert, + Self::UpdateOne { .. } | Self::UpdateMany { .. } | Self::ReplaceOne { .. } => { + OperationType::Update + } + Self::DeleteOne { .. } | Self::DeleteMany { .. } => OperationType::Delete, + } + } + + /// Whether this operation should apply to all documents that match the filter. Returns None if + /// the operation does not use a filter. + pub(crate) fn multi(&self) -> Option { + match self { + Self::UpdateMany { .. } | Self::DeleteMany { .. } => Some(true), + Self::UpdateOne { .. } | Self::ReplaceOne { .. } | Self::DeleteOne { .. } => { + Some(false) + } + Self::InsertOne { .. } => None, + } + } + + pub(crate) fn operation_name(&self) -> &'static str { + match self.operation_type() { + OperationType::Insert => "insert", + OperationType::Update => "update", + OperationType::Delete => "delete", + } + } + + /// Returns the operation-specific fields that should be included in this model's entry in the + /// ops array. Also returns an inserted ID if this is an insert operation. + pub(crate) fn get_ops_document_contents(&self) -> Result<(RawDocumentBuf, Option)> { + if let Self::UpdateOne { update, .. } | Self::UpdateMany { update, .. } = self { + if let UpdateModifications::Document(update_document) = update { + update_document_check(update_document)?; + } + } else if let Self::ReplaceOne { replacement, .. } = self { + replacement_document_check(replacement)?; + } + + let (mut model_document, inserted_id) = match self { + Self::InsertOne { document, .. } => { + let mut insert_document = RawDocumentBuf::from_document(document)?; + let inserted_id = get_or_prepend_id_field(&mut insert_document)?; + (rawdoc! { "document": insert_document }, Some(inserted_id)) + } + _ => { + let model_document = bson::to_raw_document_buf(&self)?; + (model_document, None) + } + }; + + if let Some(multi) = self.multi() { + model_document.append("multi", multi); + } + + Ok((model_document, inserted_id)) + } +} diff --git a/src/cmap/conn/command.rs b/src/cmap/conn/command.rs index 933125156..05059731f 100644 --- a/src/cmap/conn/command.rs +++ b/src/cmap/conn/command.rs @@ -199,18 +199,6 @@ impl RawCommandResponse { }) } - /// Initialize a response from a document. - #[cfg(test)] - pub(crate) fn with_document(doc: Document) -> Result { - Self::with_document_and_address( - ServerAddress::Tcp { - host: "localhost".to_string(), - port: None, - }, - doc, - ) - } - pub(crate) fn new(source: ServerAddress, message: Message) -> Self { Self::new_raw(source, message.document_payload) } diff --git a/src/cmap/conn/stream_description.rs b/src/cmap/conn/stream_description.rs index b24164e26..405806ab9 100644 --- a/src/cmap/conn/stream_description.rs +++ b/src/cmap/conn/stream_description.rs @@ -78,29 +78,4 @@ impl StreamDescription { && self.logical_session_timeout.is_some() && self.max_wire_version.map_or(false, |version| version >= 6) } - - /// Gets a description of a stream for a 4.2 connection. - #[cfg(test)] - pub(crate) fn new_testing() -> Self { - Self::with_wire_version(8) - } - - /// Gets a description of a stream for a connection to a server with the provided - /// maxWireVersion. - #[cfg(test)] - pub(crate) fn with_wire_version(max_wire_version: i32) -> Self { - Self { - server_address: Default::default(), - initial_server_type: Default::default(), - max_wire_version: Some(max_wire_version), - min_wire_version: Some(max_wire_version), - sasl_supported_mechs: Default::default(), - logical_session_timeout: Some(Duration::from_secs(30 * 60)), - max_bson_object_size: 16 * 1024 * 1024, - max_write_batch_size: 100_000, - hello_ok: false, - max_message_size_bytes: 48_000_000, - service_id: None, - } - } } diff --git a/src/cmap/test/integration.rs b/src/cmap/test/integration.rs index e5a3a4ed8..97fe4c022 100644 --- a/src/cmap/test/integration.rs +++ b/src/cmap/test/integration.rs @@ -18,10 +18,10 @@ use crate::{ test::{ get_client_options, log_uncaptured, - util::event_buffer::EventBuffer, - FailCommandOptions, - FailPoint, - FailPointMode, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, TestClient, }, }; @@ -189,13 +189,12 @@ async fn connection_error_during_establishment() { return; } - let options = FailCommandOptions::builder().error_code(1234).build(); - let failpoint = FailPoint::fail_command( + let fail_point = FailPoint::fail_command( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::Times(10), - Some(options), - ); - let _fp_guard = client.enable_failpoint(failpoint, None).await.unwrap(); + ) + .error_code(1234); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let buffer = EventBuffer::::new(); #[allow(deprecated)] @@ -245,9 +244,9 @@ async fn connection_error_during_operation() { return; } - let options = FailCommandOptions::builder().close_connection(true).build(); - let failpoint = FailPoint::fail_command(&["ping"], FailPointMode::Times(10), Some(options)); - let _fp_guard = client.enable_failpoint(failpoint, None).await.unwrap(); + let fail_point = + FailPoint::fail_command(&["ping"], FailPointMode::Times(10)).close_connection(true); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); #[allow(deprecated)] let mut subscriber = buffer.subscribe(); diff --git a/src/coll.rs b/src/coll.rs index ef34cb2fb..9b3a0ff43 100644 --- a/src/coll.rs +++ b/src/coll.rs @@ -212,7 +212,7 @@ where } /// A struct modeling the canonical name for a collection in MongoDB. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Namespace { /// The name of the database associated with this namespace. pub db: String, @@ -230,14 +230,6 @@ impl Namespace { } } - #[cfg(test)] - pub(crate) fn empty() -> Self { - Self { - db: String::new(), - coll: String::new(), - } - } - pub(crate) fn from_str(s: &str) -> Option { let mut parts = s.split('.'); diff --git a/src/cursor/common.rs b/src/cursor/common.rs index 30d05ca0a..4c8bc3b5a 100644 --- a/src/cursor/common.rs +++ b/src/cursor/common.rs @@ -443,21 +443,6 @@ impl CursorSpecification { pub(crate) fn id(&self) -> i64 { self.info.id } - - #[cfg(test)] - pub(crate) fn address(&self) -> &ServerAddress { - &self.info.address - } - - #[cfg(test)] - pub(crate) fn batch_size(&self) -> Option { - self.info.batch_size - } - - #[cfg(test)] - pub(crate) fn max_time(&self) -> Option { - self.info.max_time - } } /// Static information about a cursor. diff --git a/src/error.rs b/src/error.rs index 93fa130c4..16ae6333f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,5 +1,7 @@ //! Contains the `Error` and `Result` types that `mongodb` uses. +mod bulk_write; + use std::{ any::Any, collections::{HashMap, HashSet}, @@ -7,14 +9,16 @@ use std::{ sync::Arc, }; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + use crate::{ - bson::Document, + bson::{Bson, Document}, options::ServerAddress, sdam::{ServerType, TopologyVersion}, }; -use bson::Bson; -use serde::{Deserialize, Serialize}; -use thiserror::Error; + +pub use bulk_write::BulkWriteError as ClientBulkWriteError; const RECOVERING_CODES: [i32; 5] = [11600, 11602, 13436, 189, 91]; const NOTWRITABLEPRIMARY_CODES: [i32; 3] = [10107, 13435, 10058]; @@ -258,7 +262,13 @@ impl Error { /// Whether this error contains the specified label. pub fn contains_label>(&self, label: T) -> bool { - self.labels().contains(label.as_ref()) + let label = label.as_ref(); + self.labels().contains(label) + || self + .source + .as_ref() + .map(|source| source.contains_label(label)) + .unwrap_or(false) } /// Adds the given label to this error. @@ -309,7 +319,7 @@ impl Error { } /// Gets the code from this error. - #[allow(unused)] + #[cfg(test)] pub(crate) fn code(&self) -> Option { match self.kind.as_ref() { ErrorKind::Command(command_error) => Some(command_error.code), @@ -464,6 +474,10 @@ impl Error { /// sensitive commands. Currently, the only field besides those that we expose is the /// error message. pub(crate) fn redact(&mut self) { + if let Some(source) = self.source.as_deref_mut() { + source.redact(); + } + // This is intentionally written without a catch-all branch so that if new error // kinds are added we remember to reason about whether they need to be redacted. match *self.kind { @@ -477,6 +491,14 @@ impl Error { wce.redact(); } } + ErrorKind::ClientBulkWrite(ref mut client_bulk_write_error) => { + for write_concern_error in client_bulk_write_error.write_concern_errors.iter_mut() { + write_concern_error.redact(); + } + for (_, write_error) in client_bulk_write_error.write_errors.iter_mut() { + write_error.redact(); + } + } ErrorKind::Command(ref mut command_error) => { command_error.redact(); } @@ -594,6 +616,9 @@ pub enum ErrorKind { #[error("An error occurred when trying to execute a write operation: {0:?}")] BulkWrite(BulkWriteFailure), + #[error("An error occurred when executing Client::bulk_write: {0:?}")] + ClientBulkWrite(ClientBulkWriteError), + /// The server returned an error to an attempted operation. #[error("Command failed: {0}")] // note that if this Display impl changes, COMMAND_ERROR_REGEX in the unified runner matching @@ -743,7 +768,7 @@ pub struct WriteConcernError { pub code_name: String, /// A description of the error that occurred. - #[serde(rename = "errmsg", default = "String::new")] + #[serde(alias = "errmsg", default = "String::new")] pub message: String, /// A document identifying the write concern setting related to the error. @@ -895,6 +920,7 @@ impl WriteFailure { } } + #[cfg(test)] pub(crate) fn code(&self) -> i32 { match self { Self::WriteConcernError(e) => e.code, diff --git a/src/error/bulk_write.rs b/src/error/bulk_write.rs new file mode 100644 index 000000000..71b491ee4 --- /dev/null +++ b/src/error/bulk_write.rs @@ -0,0 +1,34 @@ +#![allow(missing_docs)] + +use std::collections::HashMap; + +use crate::{ + error::{WriteConcernError, WriteError}, + results::BulkWriteResult, +}; + +#[derive(Clone, Debug, Default)] +#[non_exhaustive] +pub struct BulkWriteError { + pub write_concern_errors: Vec, + pub write_errors: HashMap, + pub partial_result: Option, +} + +impl BulkWriteError { + pub(crate) fn merge(&mut self, other: BulkWriteError) { + self.write_concern_errors.extend(other.write_concern_errors); + self.write_errors.extend(other.write_errors); + if let Some(other_partial_result) = other.partial_result { + self.merge_partial_results(other_partial_result); + } + } + + pub(crate) fn merge_partial_results(&mut self, other_partial_result: BulkWriteResult) { + if let Some(ref mut partial_result) = self.partial_result { + partial_result.merge(other_partial_result); + } else { + self.partial_result = Some(other_partial_result); + } + } +} diff --git a/src/hello.rs b/src/hello.rs index a54ddbb32..21ba761d6 100644 --- a/src/hello.rs +++ b/src/hello.rs @@ -136,8 +136,8 @@ pub(crate) struct HelloCommandResponse { /// Whether the server is an arbiter. pub arbiter_only: Option, - #[serde(rename = "isreplicaset")] + #[serde(rename = "isreplicaset")] /// Whether the server is a replica set. pub is_replica_set: Option, diff --git a/src/operation.rs b/src/operation.rs index 3cf389bf4..3ffc7ee7a 100644 --- a/src/operation.rs +++ b/src/operation.rs @@ -1,5 +1,6 @@ mod abort_transaction; pub(crate) mod aggregate; +pub(crate) mod bulk_write; mod commit_transaction; pub(crate) mod count; pub(crate) mod count_documents; @@ -23,19 +24,23 @@ pub(crate) mod run_cursor_command; mod search_index; mod update; -#[cfg(test)] -mod test; - use std::{collections::VecDeque, fmt::Debug, ops::Deref}; use bson::{RawBsonRef, RawDocument, RawDocumentBuf, Timestamp}; +use futures_util::FutureExt; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ bson::{self, Bson, Document}, bson_util::{self, extend_raw_document_buf}, client::{ClusterTime, HELLO_COMMAND_NAMES, REDACTED_COMMANDS}, - cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, + cmap::{ + conn::PinnedConnectionHandle, + Command, + Connection, + RawCommandResponse, + StreamDescription, + }, error::{ BulkWriteError, BulkWriteFailure, @@ -48,6 +53,8 @@ use crate::{ }, options::WriteConcern, selection_criteria::SelectionCriteria, + BoxFuture, + ClientSession, Namespace, }; @@ -72,8 +79,22 @@ const SERVER_4_4_0_WIRE_VERSION: i32 = 9; // The maximum number of bytes that may be included in a write payload when auto-encryption is // enabled. const MAX_ENCRYPTED_WRITE_SIZE: usize = 2_097_152; -// The amount of overhead bytes to account for when building a document sequence. -const COMMAND_OVERHEAD_SIZE: usize = 16_000; +// The amount of message overhead (OP_MSG bytes and command-agnostic fields) to account for when +// building a multi-write operation using document sequences. +const OP_MSG_OVERHEAD_BYTES: usize = 1_000; + +/// Context about the execution of the operation. +pub(crate) struct ExecutionContext<'a> { + pub(crate) connection: &'a mut Connection, + pub(crate) session: Option<&'a mut ClientSession>, +} + +#[derive(Debug, PartialEq, Clone, Copy)] +pub(crate) enum Retryability { + Write, + Read, + None, +} /// A trait modeling the behavior of a server side operation. /// @@ -98,11 +119,11 @@ pub(crate) trait Operation { fn extract_at_cluster_time(&self, _response: &RawDocument) -> Result>; /// Interprets the server response to the command. - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, - ) -> Result; + context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result>; /// Interpret an error encountered while sending the built command to the server, potentially /// recovering. @@ -118,7 +139,7 @@ pub(crate) trait Operation { fn write_concern(&self) -> Option<&WriteConcern>; /// Returns whether or not this command supports the `readConcern` field. - fn supports_read_concern(&self, _description: &StreamDescription) -> bool; + fn supports_read_concern(&self, description: &StreamDescription) -> bool; /// Whether this operation supports sessions or not. fn supports_sessions(&self) -> bool; @@ -134,6 +155,152 @@ pub(crate) trait Operation { fn name(&self) -> &str; } +// A mirror of the `Operation` trait, with default behavior where appropriate. Should only be +// implemented by operation types that do not delegate to other operations. +pub(crate) trait OperationWithDefaults: Send + Sync { + /// The output type of this operation. + type O; + + /// The format of the command body constructed in `build`. + type Command: CommandBody; + + /// The name of the server side command associated with this operation. + const NAME: &'static str; + + /// Returns the command that should be sent to the server as part of this operation. + /// The operation may store some additional state that is required for handling the response. + fn build(&mut self, description: &StreamDescription) -> Result>; + + /// Parse the response for the atClusterTime field. + /// Depending on the operation, this may be found in different locations. + fn extract_at_cluster_time(&self, _response: &RawDocument) -> Result> { + Ok(None) + } + + /// Interprets the server response to the command. + fn handle_response<'a>( + &'a self, + _response: RawCommandResponse, + _context: ExecutionContext<'a>, + ) -> Result { + Err(ErrorKind::Internal { + message: format!("operation handling not implemented for {}", Self::NAME), + } + .into()) + } + + /// Interprets the server response to the command. This method should only be implemented when + /// async code is required to handle the response. + fn handle_response_async<'a>( + &'a self, + response: RawCommandResponse, + context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { + async move { self.handle_response(response, context) }.boxed() + } + + /// Interpret an error encountered while sending the built command to the server, potentially + /// recovering. + fn handle_error(&self, error: Error) -> Result { + Err(error) + } + + /// Criteria to use for selecting the server that this operation will be executed on. + fn selection_criteria(&self) -> Option<&SelectionCriteria> { + None + } + + /// Whether or not this operation will request acknowledgment from the server. + fn is_acknowledged(&self) -> bool { + self.write_concern() + .map(WriteConcern::is_acknowledged) + .unwrap_or(true) + } + + /// The write concern to use for this operation, if any. + fn write_concern(&self) -> Option<&WriteConcern> { + None + } + + /// Returns whether or not this command supports the `readConcern` field. + fn supports_read_concern(&self, _description: &StreamDescription) -> bool { + false + } + + /// Whether this operation supports sessions or not. + fn supports_sessions(&self) -> bool { + true + } + + /// The level of retryability the operation supports. + fn retryability(&self) -> Retryability { + Retryability::None + } + + /// Updates this operation as needed for a retry. + fn update_for_retry(&mut self) {} + + fn pinned_connection(&self) -> Option<&PinnedConnectionHandle> { + None + } + + fn name(&self) -> &str { + Self::NAME + } +} + +impl Operation for T +where + T: Send + Sync, +{ + type O = T::O; + type Command = T::Command; + const NAME: &'static str = T::NAME; + fn build(&mut self, description: &StreamDescription) -> Result> { + self.build(description) + } + fn extract_at_cluster_time(&self, response: &RawDocument) -> Result> { + self.extract_at_cluster_time(response) + } + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { + self.handle_response_async(response, context) + } + fn handle_error(&self, error: Error) -> Result { + self.handle_error(error) + } + fn selection_criteria(&self) -> Option<&SelectionCriteria> { + self.selection_criteria() + } + fn is_acknowledged(&self) -> bool { + self.is_acknowledged() + } + fn write_concern(&self) -> Option<&WriteConcern> { + self.write_concern() + } + fn supports_read_concern(&self, description: &StreamDescription) -> bool { + self.supports_read_concern(description) + } + fn supports_sessions(&self) -> bool { + self.supports_sessions() + } + fn retryability(&self) -> Retryability { + self.retryability() + } + fn update_for_retry(&mut self) { + self.update_for_retry() + } + fn pinned_connection(&self) -> Option<&PinnedConnectionHandle> { + self.pinned_connection() + } + fn name(&self) -> &str { + self.name() + } +} + pub(crate) trait CommandBody: Serialize { fn should_redact(&self) -> bool { false @@ -242,7 +409,9 @@ pub(crate) fn append_options_to_raw_document( } #[derive(Deserialize, Debug)] -pub(crate) struct EmptyBody {} +pub(crate) struct SingleWriteBody { + n: u64, +} /// Body of a write response that could possibly have a write concern error but not write errors. #[derive(Debug, Deserialize, Default, Clone)] @@ -267,12 +436,10 @@ impl WriteConcernOnlyBody { } #[derive(Deserialize, Debug)] -pub(crate) struct WriteResponseBody { +pub(crate) struct WriteResponseBody { #[serde(flatten)] body: T, - n: u64, - #[serde(rename = "writeErrors")] write_errors: Option>, @@ -367,13 +534,6 @@ where } } -#[derive(Debug, PartialEq, Clone, Copy)] -pub(crate) enum Retryability { - Write, - Read, - None, -} - macro_rules! remove_empty_write_concern { ($opts:expr) => { if let Some(ref mut options) = $opts { @@ -387,131 +547,3 @@ macro_rules! remove_empty_write_concern { } pub(crate) use remove_empty_write_concern; - -// A mirror of the `Operation` trait, with default behavior where appropriate. Should only be -// implemented by operation types that do not delegate to other operations. -pub(crate) trait OperationWithDefaults { - /// The output type of this operation. - type O; - - /// The format of the command body constructed in `build`. - type Command: CommandBody; - - /// The name of the server side command associated with this operation. - const NAME: &'static str; - - /// Returns the command that should be sent to the server as part of this operation. - /// The operation may store some additional state that is required for handling the response. - fn build(&mut self, description: &StreamDescription) -> Result>; - - /// Parse the response for the atClusterTime field. - /// Depending on the operation, this may be found in different locations. - fn extract_at_cluster_time(&self, _response: &RawDocument) -> Result> { - Ok(None) - } - - /// Interprets the server response to the command. - fn handle_response( - &self, - response: RawCommandResponse, - description: &StreamDescription, - ) -> Result; - - /// Interpret an error encountered while sending the built command to the server, potentially - /// recovering. - fn handle_error(&self, error: Error) -> Result { - Err(error) - } - - /// Criteria to use for selecting the server that this operation will be executed on. - fn selection_criteria(&self) -> Option<&SelectionCriteria> { - None - } - - /// Whether or not this operation will request acknowledgment from the server. - fn is_acknowledged(&self) -> bool { - self.write_concern() - .map(WriteConcern::is_acknowledged) - .unwrap_or(true) - } - - /// The write concern to use for this operation, if any. - fn write_concern(&self) -> Option<&WriteConcern> { - None - } - - /// Returns whether or not this command supports the `readConcern` field. - fn supports_read_concern(&self, _description: &StreamDescription) -> bool { - false - } - - /// Whether this operation supports sessions or not. - fn supports_sessions(&self) -> bool { - true - } - - /// The level of retryability the operation supports. - fn retryability(&self) -> Retryability { - Retryability::None - } - - /// Updates this operation as needed for a retry. - fn update_for_retry(&mut self) {} - - fn pinned_connection(&self) -> Option<&PinnedConnectionHandle> { - None - } - - fn name(&self) -> &str { - Self::NAME - } -} - -impl Operation for T { - type O = T::O; - type Command = T::Command; - const NAME: &'static str = T::NAME; - fn build(&mut self, description: &StreamDescription) -> Result> { - self.build(description) - } - fn extract_at_cluster_time(&self, response: &RawDocument) -> Result> { - self.extract_at_cluster_time(response) - } - fn handle_response( - &self, - response: RawCommandResponse, - description: &StreamDescription, - ) -> Result { - self.handle_response(response, description) - } - fn handle_error(&self, error: Error) -> Result { - self.handle_error(error) - } - fn selection_criteria(&self) -> Option<&SelectionCriteria> { - self.selection_criteria() - } - fn is_acknowledged(&self) -> bool { - self.is_acknowledged() - } - fn write_concern(&self) -> Option<&WriteConcern> { - self.write_concern() - } - fn supports_read_concern(&self, description: &StreamDescription) -> bool { - self.supports_read_concern(description) - } - fn supports_sessions(&self) -> bool { - self.supports_sessions() - } - fn retryability(&self) -> Retryability { - self.retryability() - } - fn update_for_retry(&mut self) { - self.update_for_retry() - } - fn pinned_connection(&self) -> Option<&PinnedConnectionHandle> { - self.pinned_connection() - } - fn name(&self) -> &str { - self.name() - } -} diff --git a/src/operation/abort_transaction.rs b/src/operation/abort_transaction.rs index 7a08861dd..ef669e26e 100644 --- a/src/operation/abort_transaction.rs +++ b/src/operation/abort_transaction.rs @@ -1,7 +1,5 @@ -use bson::Document; - use crate::{ - bson::doc, + bson::{doc, Document}, client::session::TransactionPin, cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, error::Result, @@ -10,7 +8,7 @@ use crate::{ selection_criteria::SelectionCriteria, }; -use super::{OperationWithDefaults, WriteConcernOnlyBody}; +use super::{ExecutionContext, OperationWithDefaults, WriteConcernOnlyBody}; pub(crate) struct AbortTransaction { write_concern: Option, @@ -49,10 +47,10 @@ impl OperationWithDefaults for AbortTransaction { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response: WriteConcernOnlyBody = response.body()?; response.validate() diff --git a/src/operation/aggregate.rs b/src/operation/aggregate.rs index 3234f2e1e..ac777aca1 100644 --- a/src/operation/aggregate.rs +++ b/src/operation/aggregate.rs @@ -1,8 +1,5 @@ pub(crate) mod change_stream; -#[cfg(test)] -mod test; - use crate::{ bson::{doc, Bson, Document}, bson_util, @@ -16,6 +13,7 @@ use crate::{ use super::{ CursorBody, + ExecutionContext, OperationWithDefaults, WriteConcernOnlyBody, SERVER_4_2_0_WIRE_VERSION, @@ -30,11 +28,6 @@ pub(crate) struct Aggregate { } impl Aggregate { - #[cfg(test)] - fn empty() -> Self { - Self::new(Namespace::empty(), Vec::new(), None) - } - pub(crate) fn new( target: impl Into, pipeline: impl IntoIterator, @@ -87,10 +80,10 @@ impl OperationWithDefaults for Aggregate { CursorBody::extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, + context: ExecutionContext<'a>, ) -> Result { let cursor_response: CursorBody = response.body()?; @@ -99,6 +92,8 @@ impl OperationWithDefaults for Aggregate { wc_error_info.validate()?; }; + let description = context.connection.stream_description()?; + // The comment should only be propagated to getMore calls on 4.4+. let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { None diff --git a/src/operation/aggregate/change_stream.rs b/src/operation/aggregate/change_stream.rs index 50a108cc3..e821d7ab2 100644 --- a/src/operation/aggregate/change_stream.rs +++ b/src/operation/aggregate/change_stream.rs @@ -4,7 +4,7 @@ use crate::{ cmap::{Command, RawCommandResponse, StreamDescription}, cursor::CursorSpecification, error::Result, - operation::{append_options, OperationWithDefaults, Retryability}, + operation::{append_options, ExecutionContext, OperationWithDefaults, Retryability}, options::{ChangeStreamOptions, SelectionCriteria, WriteConcern}, }; @@ -82,16 +82,21 @@ impl OperationWithDefaults for ChangeStreamAggregate { self.inner.extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, + mut context: ExecutionContext<'a>, ) -> Result { let op_time = response .raw_body() .get("operationTime")? .and_then(bson::RawBsonRef::as_timestamp); - let spec = self.inner.handle_response(response, description)?; + + let inner_context = ExecutionContext { + connection: context.connection, + session: context.session.as_deref_mut(), + }; + let spec = self.inner.handle_response(response, inner_context)?; let mut data = ChangeStreamData { resume_token: ResumeToken::initial(self.args.options.as_ref(), &spec), @@ -102,6 +107,8 @@ impl OperationWithDefaults for ChangeStreamAggregate { && o.resume_after.is_none() && o.start_after.is_none() }; + + let description = context.connection.stream_description()?; if self.args.options.as_ref().map_or(true, has_no_time) && description.max_wire_version.map_or(false, |v| v >= 7) && spec.initial_buffer.is_empty() diff --git a/src/operation/aggregate/test.rs b/src/operation/aggregate/test.rs deleted file mode 100644 index 67551a5e5..000000000 --- a/src/operation/aggregate/test.rs +++ /dev/null @@ -1,96 +0,0 @@ -use std::time::Duration; - -use crate::{ - bson::doc, - error::{ErrorKind, WriteFailure}, - operation::{ - aggregate::Aggregate, - test::{self, handle_response_test}, - }, - options::AggregateOptions, - Namespace, -}; - -#[test] -fn op_selection_criteria() { - test::op_selection_criteria(|selection_criteria| { - let options = AggregateOptions { - selection_criteria, - ..Default::default() - }; - Aggregate::new("".to_string(), Vec::new(), Some(options)) - }); -} - -#[test] -fn handle_max_await_time() { - let response = doc! { - "ok": 1, - "cursor": { - "id": 123, - "ns": "a.b", - "firstBatch": [] - } - }; - - let aggregate = Aggregate::empty(); - let spec = handle_response_test(&aggregate, response.clone()).unwrap(); - assert!(spec.max_time().is_none()); - - let max_await = Duration::from_millis(123); - let options = AggregateOptions::builder() - .max_await_time(max_await) - .build(); - let aggregate = Aggregate::new(Namespace::empty(), Vec::new(), Some(options)); - let spec = handle_response_test(&aggregate, response).unwrap(); - assert_eq!(spec.max_time(), Some(max_await)); -} - -#[test] -fn handle_write_concern_error() { - let response = doc! { - "ok": 1.0, - "cursor": { - "id": 0, - "ns": "test.test", - "firstBatch": [], - }, - "writeConcernError": { - "code": 64, - "codeName": "WriteConcernFailed", - "errmsg": "Waiting for replication timed out", - "errInfo": { - "wtimeout": true - } - } - }; - - let aggregate = Aggregate::new( - Namespace::empty(), - vec![doc! { "$merge": { "into": "a" } }], - None, - ); - - let error = handle_response_test(&aggregate, response).unwrap_err(); - match *error.kind { - ErrorKind::Write(WriteFailure::WriteConcernError(_)) => {} - ref e => panic!("should have gotten WriteConcernError, got {:?} instead", e), - } -} - -#[test] -fn handle_invalid_response() { - let aggregate = Aggregate::empty(); - - let garbled = doc! { "asdfasf": "ASdfasdf" }; - handle_response_test(&aggregate, garbled).unwrap_err(); - - let missing_cursor_field = doc! { - "ok": 1.0, - "cursor": { - "ns": "test.test", - "firstBatch": [], - } - }; - handle_response_test(&aggregate, missing_cursor_field).unwrap_err(); -} diff --git a/src/operation/bulk_write.rs b/src/operation/bulk_write.rs new file mode 100644 index 000000000..18433fe9f --- /dev/null +++ b/src/operation/bulk_write.rs @@ -0,0 +1,339 @@ +mod server_responses; + +use std::collections::HashMap; + +use futures_core::TryStream; +use futures_util::{FutureExt, TryStreamExt}; + +use crate::{ + bson::{rawdoc, Bson, RawDocumentBuf}, + bson_util::{self, extend_raw_document_buf}, + checked::Checked, + cmap::{Command, RawCommandResponse, StreamDescription}, + cursor::CursorSpecification, + error::{ClientBulkWriteError, Error, ErrorKind, Result}, + operation::OperationWithDefaults, + options::{BulkWriteOptions, OperationType, WriteModel}, + results::{BulkWriteResult, DeleteResult, InsertOneResult, UpdateResult}, + BoxFuture, + Client, + Cursor, + Namespace, + SessionCursor, +}; + +use super::{ExecutionContext, Retryability, WriteResponseBody, OP_MSG_OVERHEAD_BYTES}; + +use server_responses::*; + +pub(crate) struct BulkWrite<'a> { + client: Client, + models: &'a [WriteModel], + offset: usize, + options: Option<&'a BulkWriteOptions>, + /// The _ids of the inserted documents. This value is populated in `build`. + inserted_ids: HashMap, + /// The number of writes that were sent to the server. This value is populated in `build`. + pub(crate) n_attempted: usize, +} + +impl<'a> BulkWrite<'a> { + pub(crate) async fn new( + client: Client, + models: &'a [WriteModel], + offset: usize, + options: Option<&'a BulkWriteOptions>, + ) -> BulkWrite<'a> { + Self { + client, + models, + offset, + options, + n_attempted: 0, + inserted_ids: HashMap::new(), + } + } + + fn is_verbose(&self) -> bool { + self.options + .as_ref() + .and_then(|o| o.verbose_results) + .unwrap_or(false) + } + + async fn iterate_results_cursor( + &self, + mut stream: impl TryStream + Unpin, + error: &mut ClientBulkWriteError, + ) -> Result<()> { + let result = &mut error.partial_result; + + while let Some(response) = stream.try_next().await? { + let index = response.index + self.offset; + match response.result { + SingleOperationResult::Success { + n, + n_modified, + upserted, + } => { + let model = self.get_model(response.index)?; + match model.operation_type() { + OperationType::Insert => { + let inserted_id = self.get_inserted_id(index)?; + let insert_result = InsertOneResult { inserted_id }; + result + .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) + .add_insert_result(index, insert_result); + } + OperationType::Update => { + let modified_count = + n_modified.ok_or_else(|| ErrorKind::InvalidResponse { + message: "nModified value not returned for update bulkWrite \ + operation" + .into(), + })?; + let update_result = UpdateResult { + matched_count: n, + modified_count, + upserted_id: upserted.map(|upserted| upserted.id), + }; + result + .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) + .add_update_result(index, update_result); + } + OperationType::Delete => { + let delete_result = DeleteResult { deleted_count: n }; + result + .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) + .add_delete_result(index, delete_result); + } + } + } + SingleOperationResult::Error(write_error) => { + error.write_errors.insert(index, write_error); + } + } + } + + Ok(()) + } + + fn get_model(&self, index: usize) -> Result<&WriteModel> { + self.models.get(index).ok_or_else(|| { + ErrorKind::InvalidResponse { + message: format!("invalid operation index returned from bulkWrite: {}", index), + } + .into() + }) + } + + fn get_inserted_id(&self, index: usize) -> Result { + match self.inserted_ids.get(&index) { + Some(inserted_id) => Ok(inserted_id.clone()), + None => Err(ErrorKind::InvalidResponse { + message: format!("invalid index returned for insert operation: {}", index), + } + .into()), + } + } +} + +/// A helper struct for tracking namespace information. +struct NamespaceInfo<'a> { + namespaces: Vec, + // Cache the namespaces and their indexes to avoid traversing the namespaces array each time a + // namespace is looked up or added. + cache: HashMap<&'a Namespace, usize>, +} + +impl<'a> NamespaceInfo<'a> { + fn new() -> Self { + Self { + namespaces: Vec::new(), + cache: HashMap::new(), + } + } + + /// Gets the index for the given namespace in the nsInfo list, adding it to the list if it is + /// not already present. + fn get_index(&mut self, namespace: &'a Namespace) -> (usize, usize) { + match self.cache.get(namespace) { + Some(index) => (*index, 0), + None => { + let namespace_doc = rawdoc! { "ns": namespace.to_string() }; + let length_added = namespace_doc.as_bytes().len(); + self.namespaces.push(namespace_doc); + let next_index = self.cache.len(); + self.cache.insert(namespace, next_index); + (next_index, length_added) + } + } + } +} + +impl<'a> OperationWithDefaults for BulkWrite<'a> { + type O = BulkWriteResult; + + type Command = RawDocumentBuf; + + const NAME: &'static str = "bulkWrite"; + + fn build(&mut self, description: &StreamDescription) -> Result> { + let max_message_size: usize = + Checked::new(description.max_message_size_bytes).try_into()?; + let max_operations: usize = Checked::new(description.max_write_batch_size).try_into()?; + + let mut command_body = rawdoc! { Self::NAME: 1 }; + let options = match self.options { + Some(options) => bson::to_raw_document_buf(options), + None => bson::to_raw_document_buf(&BulkWriteOptions::default()), + }?; + bson_util::extend_raw_document_buf(&mut command_body, options)?; + + let max_document_sequences_size: usize = (Checked::new(max_message_size) + - OP_MSG_OVERHEAD_BYTES + - command_body.as_bytes().len()) + .try_into()?; + + let mut namespace_info = NamespaceInfo::new(); + let mut ops = Vec::new(); + let mut current_size = Checked::new(0); + for (i, model) in self.models.iter().take(max_operations).enumerate() { + let (namespace_index, namespace_size) = namespace_info.get_index(model.namespace()); + + let operation_namespace_index: i32 = Checked::new(namespace_index).try_into()?; + let mut operation = rawdoc! { model.operation_name(): operation_namespace_index }; + let (model_doc, inserted_id) = model.get_ops_document_contents()?; + extend_raw_document_buf(&mut operation, model_doc)?; + + let operation_size = operation.as_bytes().len(); + + current_size += namespace_size + operation_size; + if current_size.get()? > max_document_sequences_size { + // Remove the namespace doc from the list if one was added for this operation. + if namespace_size > 0 { + let last_index = namespace_info.namespaces.len() - 1; + namespace_info.namespaces.remove(last_index); + } + break; + } + + if let Some(inserted_id) = inserted_id { + self.inserted_ids.insert(i, inserted_id); + } + ops.push(operation); + } + + if ops.is_empty() { + return Err(ErrorKind::InvalidArgument { + message: format!( + "operation at index {} exceeds the maximum message size ({} bytes)", + self.offset, max_message_size + ), + } + .into()); + } + + self.n_attempted = ops.len(); + + let mut command = Command::new(Self::NAME, "admin", command_body); + command.add_document_sequence("nsInfo", namespace_info.namespaces); + command.add_document_sequence("ops", ops); + Ok(command) + } + + fn handle_response_async<'b>( + &'b self, + response: RawCommandResponse, + context: ExecutionContext<'b>, + ) -> BoxFuture<'b, Result> { + async move { + let response: WriteResponseBody = response.body()?; + + let mut bulk_write_error = ClientBulkWriteError::default(); + + // A partial result with summary info should only be created if one or more + // operations were successful. + let n_errors: usize = Checked::new(response.summary.n_errors).try_into()?; + if n_errors < self.n_attempted { + bulk_write_error + .partial_result + .get_or_insert_with(|| BulkWriteResult::new(self.is_verbose())) + .populate_summary_info(&response.summary); + } + + if let Some(write_concern_error) = response.write_concern_error { + bulk_write_error + .write_concern_errors + .push(write_concern_error); + } + + let specification = CursorSpecification::new( + response.body.cursor, + context + .connection + .stream_description()? + .server_address + .clone(), + None, + None, + self.options.and_then(|options| options.comment.clone()), + ); + let pinned_connection = self + .client + .pin_connection_for_cursor(&specification, context.connection)?; + let iteration_result = match context.session { + Some(session) => { + let mut session_cursor = + SessionCursor::new(self.client.clone(), specification, pinned_connection); + self.iterate_results_cursor( + session_cursor.stream(session), + &mut bulk_write_error, + ) + .await + } + None => { + let cursor = + Cursor::new(self.client.clone(), specification, None, pinned_connection); + self.iterate_results_cursor(cursor, &mut bulk_write_error) + .await + } + }; + + match iteration_result { + Ok(()) => { + if bulk_write_error.write_errors.is_empty() + && bulk_write_error.write_concern_errors.is_empty() + { + Ok(bulk_write_error + .partial_result + .unwrap_or_else(|| BulkWriteResult::new(self.is_verbose()))) + } else { + let error = Error::new( + ErrorKind::ClientBulkWrite(bulk_write_error), + response.labels, + ); + Err(error) + } + } + Err(error) => { + let error = Error::new( + ErrorKind::ClientBulkWrite(bulk_write_error), + response.labels, + ) + .with_source(error); + Err(error) + } + } + } + .boxed() + } + + fn retryability(&self) -> Retryability { + if self.models.iter().any(|model| model.multi() == Some(true)) { + Retryability::None + } else { + Retryability::Write + } + } +} diff --git a/src/operation/bulk_write/server_responses.rs b/src/operation/bulk_write/server_responses.rs new file mode 100644 index 000000000..15c70b3ab --- /dev/null +++ b/src/operation/bulk_write/server_responses.rs @@ -0,0 +1,64 @@ +use serde::Deserialize; + +use crate::{bson::Bson, error::WriteError, operation::CursorInfo, results::BulkWriteResult}; + +/// The top-level response to the bulkWrite command. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(super) struct Response { + pub(super) cursor: CursorInfo, + #[serde(flatten)] + pub(super) summary: SummaryInfo, +} + +/// The summary information contained within the top-level response to the bulkWrite command. +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(super) struct SummaryInfo { + pub(super) n_errors: i64, + pub(super) n_inserted: i64, + pub(super) n_matched: i64, + pub(super) n_modified: i64, + pub(super) n_upserted: i64, + pub(super) n_deleted: i64, +} + +impl BulkWriteResult { + pub(super) fn populate_summary_info(&mut self, summary_info: &SummaryInfo) { + self.inserted_count += summary_info.n_inserted; + self.upserted_count += summary_info.n_upserted; + self.matched_count += summary_info.n_matched; + self.modified_count += summary_info.n_modified; + self.deleted_count += summary_info.n_deleted; + } +} + +/// The structure of the response for a single operation within the results cursor. +#[derive(Debug, Deserialize)] +pub(super) struct SingleOperationResponse { + #[serde(rename = "idx")] + pub(super) index: usize, + #[serde(flatten)] + pub(super) result: SingleOperationResult, +} + +/// The structure of the non-index fields for a single operation within the results cursor. +#[derive(Debug, Deserialize)] +#[serde(untagged)] +pub(super) enum SingleOperationResult { + // This variant must be listed first for proper deserialization. + Error(WriteError), + #[serde(rename_all = "camelCase")] + Success { + n: u64, + n_modified: Option, + upserted: Option, + }, +} + +/// The structure of the inserted ID for an upserted document. +#[derive(Debug, Deserialize)] +pub(super) struct UpsertedId { + #[serde(rename = "_id")] + pub(super) id: Bson, +} diff --git a/src/operation/commit_transaction.rs b/src/operation/commit_transaction.rs index 3c3d455d6..8de7cd5b4 100644 --- a/src/operation/commit_transaction.rs +++ b/src/operation/commit_transaction.rs @@ -9,7 +9,7 @@ use crate::{ options::{Acknowledgment, TransactionOptions, WriteConcern}, }; -use super::WriteConcernOnlyBody; +use super::{ExecutionContext, WriteConcernOnlyBody}; pub(crate) struct CommitTransaction { options: Option, @@ -42,10 +42,10 @@ impl OperationWithDefaults for CommitTransaction { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response: WriteConcernOnlyBody = response.body()?; response.validate() diff --git a/src/operation/count.rs b/src/operation/count.rs index 7091b1991..daf35753b 100644 --- a/src/operation/count.rs +++ b/src/operation/count.rs @@ -1,8 +1,7 @@ -use bson::Document; use serde::Deserialize; use crate::{ - bson::doc, + bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, coll::{options::EstimatedDocumentCountOptions, Namespace}, error::{Error, Result}, @@ -10,6 +9,8 @@ use crate::{ selection_criteria::SelectionCriteria, }; +use super::ExecutionContext; + pub(crate) struct Count { ns: Namespace, options: Option, @@ -42,10 +43,10 @@ impl OperationWithDefaults for Count { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response_body: ResponseBody = response.body()?; Ok(response_body.n) diff --git a/src/operation/count_documents.rs b/src/operation/count_documents.rs index db7160394..b61503f27 100644 --- a/src/operation/count_documents.rs +++ b/src/operation/count_documents.rs @@ -2,8 +2,8 @@ use std::convert::TryInto; use serde::Deserialize; -use super::{OperationWithDefaults, Retryability, SingleCursorResult}; use crate::{ + bson::{doc, Document, RawDocument}, cmap::{Command, RawCommandResponse, StreamDescription}, error::{Error, ErrorKind, Result}, operation::aggregate::Aggregate, @@ -11,7 +11,8 @@ use crate::{ selection_criteria::SelectionCriteria, Namespace, }; -use bson::{doc, Document, RawDocument}; + +use super::{ExecutionContext, OperationWithDefaults, Retryability, SingleCursorResult}; pub(crate) struct CountDocuments { aggregate: Aggregate, @@ -87,10 +88,10 @@ impl OperationWithDefaults for CountDocuments { self.aggregate.extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response: SingleCursorResult = response.body()?; Ok(response.0.map(|r| r.n).unwrap_or(0)) diff --git a/src/operation/create.rs b/src/operation/create.rs index 533d96776..e26b73925 100644 --- a/src/operation/create.rs +++ b/src/operation/create.rs @@ -1,7 +1,5 @@ -use bson::Document; - use crate::{ - bson::doc, + bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, operation::{ @@ -14,6 +12,8 @@ use crate::{ Namespace, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct Create { ns: Namespace, @@ -47,10 +47,10 @@ impl OperationWithDefaults for Create { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response: WriteConcernOnlyBody = response.body()?; response.validate() diff --git a/src/operation/create_indexes.rs b/src/operation/create_indexes.rs index 247e6cb40..3135cc37a 100644 --- a/src/operation/create_indexes.rs +++ b/src/operation/create_indexes.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod test; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -12,7 +9,7 @@ use crate::{ Namespace, }; -use super::WriteConcernOnlyBody; +use super::{ExecutionContext, WriteConcernOnlyBody}; #[derive(Debug)] pub(crate) struct CreateIndexes { @@ -33,18 +30,6 @@ impl CreateIndexes { options, } } - - #[cfg(test)] - pub(crate) fn with_indexes(indexes: Vec) -> Self { - Self { - ns: Namespace { - db: String::new(), - coll: String::new(), - }, - indexes, - options: None, - } - } } impl OperationWithDefaults for CreateIndexes { @@ -85,10 +70,10 @@ impl OperationWithDefaults for CreateIndexes { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response: WriteConcernOnlyBody = response.body()?; response.validate()?; diff --git a/src/operation/create_indexes/test.rs b/src/operation/create_indexes/test.rs deleted file mode 100644 index 59d1ab6d2..000000000 --- a/src/operation/create_indexes/test.rs +++ /dev/null @@ -1,85 +0,0 @@ -use std::time::Duration; - -use crate::{ - bson::doc, - cmap::StreamDescription, - coll::{ - options::{CommitQuorum, CreateIndexOptions}, - Namespace, - }, - concern::WriteConcern, - index::{options::IndexOptions, IndexModel}, - operation::{test::handle_response_test, CreateIndexes, Operation}, - results::CreateIndexesResult, -}; - -#[test] -fn build() { - let ns = Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }; - - let index_options = IndexOptions::builder() - .name(Some("foo".to_string())) - .build(); - let index_model = IndexModel::builder() - .keys(doc! { "x": 1 }) - .options(Some(index_options)) - .build(); - let create_options = CreateIndexOptions::builder() - .commit_quorum(Some(CommitQuorum::Majority)) - .max_time(Some(Duration::from_millis(42))) - .write_concern(Some(WriteConcern::builder().journal(Some(true)).build())) - .build(); - let mut create_indexes = CreateIndexes::new(ns, vec![index_model], Some(create_options)); - - let cmd = create_indexes - .build(&StreamDescription::with_wire_version(10)) - .expect("CreateIndexes command failed to build when it should have succeeded."); - - assert_eq!( - cmd.body, - doc! { - "createIndexes": "test_coll", - "indexes": [{ - "key": { "x": 1 }, - "name": "foo" - }], - "commitQuorum": "majority", - "maxTimeMS": 42, - "writeConcern": { "j": true }, - } - ) -} - -#[test] -fn handle_success() { - let a = IndexModel::builder() - .keys(doc! { "a": 1 }) - .options(Some( - IndexOptions::builder().name(Some("a".to_string())).build(), - )) - .build(); - let b = IndexModel::builder() - .keys(doc! { "b": 1 }) - .options(Some( - IndexOptions::builder().name(Some("b".to_string())).build(), - )) - .build(); - let op = CreateIndexes::with_indexes(vec![a, b]); - - let response = doc! { - "ok": 1, - "createdCollectionAutomatically": false, - "numIndexesBefore": 1, - "numIndexesAfter": 3, - "commitQuorum": "votingMembers", - }; - - let expected_values = CreateIndexesResult { - index_names: vec!["a".to_string(), "b".to_string()], - }; - let actual_values = handle_response_test(&op, response).unwrap(); - assert_eq!(actual_values, expected_values); -} diff --git a/src/operation/delete.rs b/src/operation/delete.rs index 7458011d4..52b181405 100644 --- a/src/operation/delete.rs +++ b/src/operation/delete.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod test; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -18,6 +15,8 @@ use crate::{ results::DeleteResult, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct Delete { ns: Namespace, @@ -29,19 +28,6 @@ pub(crate) struct Delete { } impl Delete { - #[cfg(test)] - fn empty() -> Self { - Self::new( - Namespace { - db: String::new(), - coll: String::new(), - }, - Document::new(), - None, - None, - ) - } - pub(crate) fn new( ns: Namespace, filter: Document, @@ -95,10 +81,10 @@ impl OperationWithDefaults for Delete { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response: WriteResponseBody = response.body()?; response.validate().map_err(convert_bulk_errors)?; diff --git a/src/operation/delete/test.rs b/src/operation/delete/test.rs deleted file mode 100644 index 50f9427cf..000000000 --- a/src/operation/delete/test.rs +++ /dev/null @@ -1,196 +0,0 @@ -use pretty_assertions::assert_eq; - -use crate::{ - bson::doc, - bson_util, - cmap::StreamDescription, - concern::{Acknowledgment, WriteConcern}, - error::{ErrorKind, WriteConcernError, WriteError, WriteFailure}, - operation::{test::handle_response_test, Delete, Operation}, - options::DeleteOptions, - Namespace, -}; - -#[test] -fn build_many() { - let ns = Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }; - let filter = doc! { "x": { "$gt": 1 } }; - - let wc = WriteConcern { - w: Some(Acknowledgment::Majority), - ..Default::default() - }; - let options = DeleteOptions::builder().write_concern(wc).build(); - - let mut op = Delete::new(ns, filter.clone(), None, Some(options)); - - let description = StreamDescription::new_testing(); - let mut cmd = op.build(&description).unwrap(); - - assert_eq!(cmd.name.as_str(), "delete"); - assert_eq!(cmd.target_db.as_str(), "test_db"); - - let mut expected_body = doc! { - "delete": "test_coll", - "deletes": [ - { - "q": filter, - "limit": 0, - } - ], - "writeConcern": { - "w": "majority" - }, - "ordered": true, - }; - - bson_util::sort_document(&mut cmd.body); - bson_util::sort_document(&mut expected_body); - - assert_eq!(cmd.body, expected_body); -} - -#[test] -fn build_one() { - let ns = Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }; - let filter = doc! { "x": { "$gt": 1 } }; - - let wc = WriteConcern { - w: Some(Acknowledgment::Majority), - ..Default::default() - }; - let options = DeleteOptions::builder().write_concern(wc).build(); - - let mut op = Delete::new(ns, filter.clone(), Some(1), Some(options)); - - let description = StreamDescription::new_testing(); - let mut cmd = op.build(&description).unwrap(); - - assert_eq!(cmd.name.as_str(), "delete"); - assert_eq!(cmd.target_db.as_str(), "test_db"); - - let mut expected_body = doc! { - "delete": "test_coll", - "deletes": [ - { - "q": filter, - "limit": 1, - } - ], - "writeConcern": { - "w": "majority" - }, - "ordered": true, - }; - - bson_util::sort_document(&mut cmd.body); - bson_util::sort_document(&mut expected_body); - - assert_eq!(cmd.body, expected_body); -} - -#[test] -fn handle_success() { - let op = Delete::empty(); - - let delete_result = handle_response_test( - &op, - doc! { - "ok": 1.0, - "n": 3 - }, - ) - .expect("should succeed"); - assert_eq!(delete_result.deleted_count, 3); -} - -#[test] -fn handle_invalid_response() { - let op = Delete::empty(); - handle_response_test( - &op, - doc! { - "ok": 1.0, - "asffasdf": 123123 - }, - ) - .expect_err("should fail"); -} - -#[test] -fn handle_write_failure() { - let op = Delete::empty(); - - let write_error_response = doc! { - "ok": 1.0, - "n": 0, - "writeErrors": [ - { - "index": 0, - "code": 1234, - "errmsg": "my error string" - } - ] - }; - let write_error = handle_response_test(&op, write_error_response).unwrap_err(); - match *write_error.kind { - ErrorKind::Write(WriteFailure::WriteError(ref error)) => { - let expected_err = WriteError { - code: 1234, - code_name: None, - message: "my error string".to_string(), - details: None, - }; - assert_eq!(error, &expected_err); - } - ref e => panic!("expected write error, got {:?}", e), - }; -} - -#[test] -fn handle_write_concern_failure() { - let op = Delete::empty(); - - let wc_error_response = doc! { - "ok": 1.0, - "n": 0, - "writeConcernError": { - "code": 456, - "codeName": "wcError", - "errmsg": "some message", - "errInfo": { - "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } - } - } - }; - - let wc_error = handle_response_test(&op, wc_error_response) - .expect_err("should fail with write concern error"); - match *wc_error.kind { - ErrorKind::Write(WriteFailure::WriteConcernError(ref wc_error)) => { - let expected_wc_err = WriteConcernError { - code: 456, - code_name: "wcError".to_string(), - message: "some message".to_string(), - details: Some(doc! { "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } }), - labels: vec![], - }; - assert_eq!(wc_error, &expected_wc_err); - } - ref e => panic!("expected write concern error, got {:?}", e), - } -} diff --git a/src/operation/distinct.rs b/src/operation/distinct.rs index 37bb37bae..8888cecf0 100644 --- a/src/operation/distinct.rs +++ b/src/operation/distinct.rs @@ -1,8 +1,7 @@ -use bson::RawBsonRef; use serde::Deserialize; use crate::{ - bson::{doc, Bson, Document}, + bson::{doc, Bson, Document, RawBsonRef}, cmap::{Command, RawCommandResponse, StreamDescription}, coll::{options::DistinctOptions, Namespace}, error::Result, @@ -10,6 +9,8 @@ use crate::{ selection_criteria::SelectionCriteria, }; +use super::ExecutionContext; + pub(crate) struct Distinct { ns: Namespace, field_name: String, @@ -68,10 +69,10 @@ impl OperationWithDefaults for Distinct { .and_then(RawBsonRef::as_timestamp)) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response: Response = response.body()?; Ok(response.values) diff --git a/src/operation/drop_collection.rs b/src/operation/drop_collection.rs index abd85f593..40c5dac91 100644 --- a/src/operation/drop_collection.rs +++ b/src/operation/drop_collection.rs @@ -1,7 +1,5 @@ -use bson::Document; - use crate::{ - bson::doc, + bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, error::{Error, Result}, operation::{ @@ -14,6 +12,8 @@ use crate::{ Namespace, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct DropCollection { ns: Namespace, @@ -47,10 +47,10 @@ impl OperationWithDefaults for DropCollection { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response: WriteConcernOnlyBody = response.body()?; response.validate() diff --git a/src/operation/drop_database.rs b/src/operation/drop_database.rs index cb1712e8c..d88761094 100644 --- a/src/operation/drop_database.rs +++ b/src/operation/drop_database.rs @@ -1,7 +1,5 @@ -use bson::Document; - use crate::{ - bson::doc, + bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, db::options::DropDatabaseOptions, error::Result, @@ -14,6 +12,8 @@ use crate::{ options::WriteConcern, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct DropDatabase { target_db: String, @@ -47,10 +47,10 @@ impl OperationWithDefaults for DropDatabase { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response: WriteConcernOnlyBody = response.body()?; response.validate() diff --git a/src/operation/drop_indexes.rs b/src/operation/drop_indexes.rs index dd7c9fbd5..e2442bbee 100644 --- a/src/operation/drop_indexes.rs +++ b/src/operation/drop_indexes.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod test; - use crate::{ bson::{doc, Document}, cmap::{Command, RawCommandResponse, StreamDescription}, @@ -10,6 +7,8 @@ use crate::{ Namespace, }; +use super::ExecutionContext; + pub(crate) struct DropIndexes { ns: Namespace, name: String, @@ -20,18 +19,6 @@ impl DropIndexes { pub(crate) fn new(ns: Namespace, name: String, options: Option) -> Self { Self { ns, name, options } } - - #[cfg(test)] - pub(crate) fn empty() -> Self { - Self { - ns: Namespace { - db: String::new(), - coll: String::new(), - }, - name: String::new(), - options: None, - } - } } impl OperationWithDefaults for DropIndexes { @@ -55,10 +42,10 @@ impl OperationWithDefaults for DropIndexes { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, _response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { Ok(()) } diff --git a/src/operation/drop_indexes/test.rs b/src/operation/drop_indexes/test.rs deleted file mode 100644 index 3a6b1fd13..000000000 --- a/src/operation/drop_indexes/test.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::time::Duration; - -use crate::{ - bson::doc, - cmap::StreamDescription, - coll::{options::DropIndexOptions, Namespace}, - concern::WriteConcern, - operation::{test::handle_response_test, DropIndexes, Operation}, -}; - -#[test] -fn build() { - let ns = Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }; - - let options = DropIndexOptions::builder() - .max_time(Some(Duration::from_secs(1))) - .write_concern(Some(WriteConcern::builder().journal(Some(true)).build())) - .build(); - - let mut drop_index = DropIndexes::new(ns, "foo".to_string(), Some(options)); - let cmd = drop_index - .build(&StreamDescription::new_testing()) - .expect("DropIndex command failed to build when it should have succeeded."); - assert_eq!( - cmd.body, - doc! { - "dropIndexes": "test_coll", - "index": "foo", - "maxTimeMS": 1000, - "writeConcern": { "j": true }, - } - ) -} - -#[test] -fn handle_success() { - let op = DropIndexes::empty(); - let response = doc! { "ok": 1 }; - handle_response_test(&op, response).unwrap(); -} diff --git a/src/operation/find.rs b/src/operation/find.rs index d4a34aa42..e7e077606 100644 --- a/src/operation/find.rs +++ b/src/operation/find.rs @@ -14,6 +14,8 @@ use crate::{ Namespace, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct Find { ns: Namespace, @@ -89,13 +91,15 @@ impl OperationWithDefaults for Find { CursorBody::extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, + context: ExecutionContext<'a>, ) -> Result { let response: CursorBody = response.body()?; + let description = context.connection.stream_description()?; + // The comment should only be propagated to getMore calls on 4.4+. let comment = if description.max_wire_version.unwrap_or(0) < SERVER_4_4_0_WIRE_VERSION { None diff --git a/src/operation/find_and_modify.rs b/src/operation/find_and_modify.rs index 45c029e0b..3c76813b1 100644 --- a/src/operation/find_and_modify.rs +++ b/src/operation/find_and_modify.rs @@ -2,12 +2,11 @@ pub(crate) mod options; use std::{fmt::Debug, marker::PhantomData}; -use bson::{from_slice, RawBson}; use serde::{de::DeserializeOwned, Deserialize}; use self::options::FindAndModifyOptions; use crate::{ - bson::{doc, rawdoc, Document, RawDocumentBuf}, + bson::{doc, from_slice, rawdoc, Document, RawBson, RawDocumentBuf}, bson_util, cmap::{Command, RawCommandResponse, StreamDescription}, coll::{options::UpdateModifications, Namespace}, @@ -22,7 +21,7 @@ use crate::{ options::WriteConcern, }; -use super::UpdateOrReplace; +use super::{ExecutionContext, UpdateOrReplace}; pub(crate) struct FindAndModify { ns: Namespace, @@ -96,10 +95,10 @@ impl OperationWithDefaults for FindAndModify { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { #[derive(Debug, Deserialize)] pub(crate) struct Response { diff --git a/src/operation/get_more.rs b/src/operation/get_more.rs index e486bbb49..aa69f3412 100644 --- a/src/operation/get_more.rs +++ b/src/operation/get_more.rs @@ -1,13 +1,9 @@ -#[cfg(test)] -mod test; - use std::{collections::VecDeque, time::Duration}; -use bson::{Document, RawDocumentBuf}; use serde::Deserialize; use crate::{ - bson::{doc, Bson}, + bson::{doc, Bson, Document, RawDocumentBuf}, change_stream::event::ResumeToken, cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, cursor::CursorInformation, @@ -18,6 +14,8 @@ use crate::{ Namespace, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct GetMore<'conn> { ns: Namespace, @@ -87,10 +85,10 @@ impl<'conn> OperationWithDefaults for GetMore<'conn> { )) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { let response: GetMoreResponseBody = response.body()?; diff --git a/src/operation/get_more/test.rs b/src/operation/get_more/test.rs deleted file mode 100644 index d93b2a746..000000000 --- a/src/operation/get_more/test.rs +++ /dev/null @@ -1,47 +0,0 @@ -use crate::{ - cursor::CursorInformation, - operation::{GetMore, Operation}, - options::ServerAddress, - sdam::{ServerDescription, ServerInfo, ServerType}, - Namespace, -}; - -#[test] -fn op_selection_criteria() { - let address = ServerAddress::Tcp { - host: "myhost.com".to_string(), - port: Some(1234), - }; - - let info = CursorInformation { - ns: Namespace::empty(), - address: address.clone(), - id: 123, - batch_size: None, - max_time: None, - comment: None, - }; - let get_more = GetMore::new(info, None); - let server_description = ServerDescription { - address, - server_type: ServerType::Unknown, - reply: Ok(None), - last_update_time: None, - average_round_trip_time: None, - }; - let server_info = ServerInfo::new_borrowed(&server_description); - - let predicate = get_more - .selection_criteria() - .expect("should not be none") - .as_predicate() - .expect("should be predicate"); - assert!(predicate(&server_info)); - - let server_description = ServerDescription { - address: ServerAddress::default(), - ..server_description - }; - let server_info = ServerInfo::new_borrowed(&server_description); - assert!(!predicate(&server_info)); -} diff --git a/src/operation/insert.rs b/src/operation/insert.rs index e2b7ca282..702485a38 100644 --- a/src/operation/insert.rs +++ b/src/operation/insert.rs @@ -1,10 +1,13 @@ -use std::{collections::HashMap, convert::TryInto}; - -use bson::{oid::ObjectId, Bson, RawArrayBuf, RawDocument, RawDocumentBuf}; +use std::collections::HashMap; use crate::{ - bson::rawdoc, - bson_util, + bson::{rawdoc, Bson, RawDocument, RawDocumentBuf}, + bson_util::{ + array_entry_size_bytes, + extend_raw_document_buf, + get_or_prepend_id_field, + vec_to_raw_array_buf, + }, checked::Checked, cmap::{Command, RawCommandResponse, StreamDescription}, error::{BulkWriteFailure, Error, ErrorKind, Result}, @@ -14,7 +17,7 @@ use crate::{ Namespace, }; -use super::{COMMAND_OVERHEAD_SIZE, MAX_ENCRYPTED_WRITE_SIZE}; +use super::{ExecutionContext, MAX_ENCRYPTED_WRITE_SIZE, OP_MSG_OVERHEAD_BYTES}; #[derive(Debug)] pub(crate) struct Insert<'a> { @@ -54,46 +57,28 @@ impl<'a> OperationWithDefaults for Insert<'a> { const NAME: &'static str = "insert"; fn build(&mut self, description: &StreamDescription) -> Result> { + let max_doc_size: usize = Checked::new(description.max_bson_object_size).try_into()?; + let max_message_size: usize = + Checked::new(description.max_message_size_bytes).try_into()?; + let max_operations: usize = Checked::new(description.max_write_batch_size).try_into()?; + + let mut command_body = rawdoc! { Self::NAME: self.ns.coll.clone() }; + let options = bson::to_raw_document_buf(&self.options)?; + extend_raw_document_buf(&mut command_body, options)?; + + let max_document_sequence_size: usize = (Checked::new(max_message_size) + - OP_MSG_OVERHEAD_BYTES + - command_body.as_bytes().len()) + .try_into()?; + let mut docs = Vec::new(); - let mut size = 0; - - let max_doc_size = Checked::::try_from(description.max_bson_object_size)?; - let max_doc_sequence_size = - Checked::::try_from(description.max_message_size_bytes)? - COMMAND_OVERHEAD_SIZE; - - for (i, &d) in self - .documents - .iter() - .take(Checked::new(description.max_write_batch_size).try_into()?) - .enumerate() - { - let mut doc = d.to_owned(); - let id = match doc.get("_id")? { - Some(b) => b.try_into()?, - None => { - let mut new_doc = RawDocumentBuf::new(); - let oid = ObjectId::new(); - new_doc.append("_id", oid); - - let mut new_bytes = new_doc.into_bytes(); - new_bytes.pop(); // remove trailing null byte - - let mut bytes = doc.into_bytes(); - let oid_slice = &new_bytes[4..]; - // insert oid at beginning of document - bytes.splice(4..4, oid_slice.iter().cloned()); - - // overwrite old length - let new_length = Checked::new(bytes.len()).try_into::()?.to_le_bytes(); - bytes[0..4].copy_from_slice(&new_length); - doc = RawDocumentBuf::from_bytes(bytes)?; - - Bson::ObjectId(oid) - } - }; + let mut current_size = Checked::new(0); + for (i, document) in self.documents.iter().take(max_operations).enumerate() { + let mut document = bson::to_raw_document_buf(document)?; + let id = get_or_prepend_id_field(&mut document)?; - let doc_size = doc.as_bytes().len(); - if doc_size > max_doc_size.get()? { + let doc_size = document.as_bytes().len(); + if doc_size > max_doc_size { return Err(ErrorKind::InvalidArgument { message: format!( "insert document must be within {} bytes, but document provided is {} \ @@ -107,18 +92,21 @@ impl<'a> OperationWithDefaults for Insert<'a> { // From the spec: Drivers MUST not reduce the size limits for a single write before // automatic encryption. I.e. if a single document has size larger than 2MiB (but less // than `maxBsonObjectSize`) proceed with automatic encryption. - if self.encrypted && i != 0 { - let doc_entry_size = bson_util::array_entry_size_bytes(i, doc.as_bytes().len())?; - if (Checked::new(size) + doc_entry_size).get()? >= MAX_ENCRYPTED_WRITE_SIZE { + if self.encrypted { + let doc_entry_size = array_entry_size_bytes(i, document.as_bytes().len())?; + current_size += doc_entry_size; + if i != 0 && current_size.get()? >= MAX_ENCRYPTED_WRITE_SIZE { + break; + } + } else { + current_size += doc_size; + if current_size.get()? > max_document_sequence_size { break; } - } else if (Checked::new(size) + doc_size).get()? > max_doc_sequence_size.get()? { - break; } self.inserted_ids.push(id); - docs.push(doc); - size += doc_size; + docs.push(document); } let mut body = rawdoc! { @@ -126,15 +114,11 @@ impl<'a> OperationWithDefaults for Insert<'a> { }; let options_doc = bson::to_raw_document_buf(&self.options)?; - bson_util::extend_raw_document_buf(&mut body, options_doc)?; + extend_raw_document_buf(&mut body, options_doc)?; if self.encrypted { // Auto-encryption does not support document sequences - let mut raw_array = RawArrayBuf::new(); - for doc in docs { - raw_array.push(doc); - } - body.append("documents", raw_array); + body.append("documents", vec_to_raw_array_buf(docs)); Ok(Command::new(Self::NAME, &self.ns.db, body)) } else { let mut command = Command::new(Self::NAME, &self.ns.db, body); @@ -143,12 +127,12 @@ impl<'a> OperationWithDefaults for Insert<'a> { } } - fn handle_response( - &self, - raw_response: RawCommandResponse, - _description: &StreamDescription, + fn handle_response<'b>( + &'b self, + response: RawCommandResponse, + _context: ExecutionContext<'b>, ) -> Result { - let response: WriteResponseBody = raw_response.body_utf8_lossy()?; + let response: WriteResponseBody = response.body_utf8_lossy()?; let response_n = Checked::::try_from(response.n)?; let mut map = HashMap::new(); diff --git a/src/operation/list_collections.rs b/src/operation/list_collections.rs index e8cfa163d..f2c3328c0 100644 --- a/src/operation/list_collections.rs +++ b/src/operation/list_collections.rs @@ -7,6 +7,8 @@ use crate::{ options::{ListCollectionsOptions, ReadPreference, SelectionCriteria}, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct ListCollections { db: String, @@ -52,15 +54,19 @@ impl OperationWithDefaults for ListCollections { Ok(Command::new(Self::NAME.to_string(), self.db.clone(), body)) } - fn handle_response( - &self, - raw_response: RawCommandResponse, - description: &StreamDescription, + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + context: ExecutionContext<'a>, ) -> Result { - let response: CursorBody = raw_response.body()?; + let response: CursorBody = response.body()?; Ok(CursorSpecification::new( response.cursor, - description.server_address.clone(), + context + .connection + .stream_description()? + .server_address + .clone(), self.options.as_ref().and_then(|opts| opts.batch_size), None, None, diff --git a/src/operation/list_databases.rs b/src/operation/list_databases.rs index 1a82a7cf8..21d43af19 100644 --- a/src/operation/list_databases.rs +++ b/src/operation/list_databases.rs @@ -1,8 +1,7 @@ -use bson::RawDocumentBuf; use serde::Deserialize; use crate::{ - bson::{doc, Document}, + bson::{doc, Document, RawDocumentBuf}, cmap::{Command, RawCommandResponse, StreamDescription}, db::options::ListDatabasesOptions, error::Result, @@ -10,6 +9,8 @@ use crate::{ selection_criteria::{ReadPreference, SelectionCriteria}, }; +use super::ExecutionContext; + #[derive(Debug)] pub(crate) struct ListDatabases { name_only: bool, @@ -43,12 +44,12 @@ impl OperationWithDefaults for ListDatabases { )) } - fn handle_response( - &self, - raw_response: RawCommandResponse, - _description: &StreamDescription, + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + _context: ExecutionContext<'a>, ) -> Result { - let response: Response = raw_response.body()?; + let response: Response = response.body()?; Ok(response.databases) } diff --git a/src/operation/list_indexes.rs b/src/operation/list_indexes.rs index b8986816a..f365a77fd 100644 --- a/src/operation/list_indexes.rs +++ b/src/operation/list_indexes.rs @@ -9,10 +9,7 @@ use crate::{ Namespace, }; -use super::{CursorBody, Retryability}; - -#[cfg(test)] -mod test; +use super::{CursorBody, ExecutionContext, Retryability}; pub(crate) struct ListIndexes { ns: Namespace, @@ -23,17 +20,6 @@ impl ListIndexes { pub(crate) fn new(ns: Namespace, options: Option) -> Self { ListIndexes { ns, options } } - - #[cfg(test)] - pub(crate) fn empty() -> Self { - Self { - ns: Namespace { - db: String::new(), - coll: String::new(), - }, - options: None, - } - } } impl OperationWithDefaults for ListIndexes { @@ -58,15 +44,19 @@ impl OperationWithDefaults for ListIndexes { )) } - fn handle_response( - &self, - raw_response: RawCommandResponse, - description: &StreamDescription, + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + context: ExecutionContext<'a>, ) -> Result { - let response: CursorBody = raw_response.body()?; + let response: CursorBody = response.body()?; Ok(CursorSpecification::new( response.cursor, - description.server_address.clone(), + context + .connection + .stream_description()? + .server_address + .clone(), self.options.as_ref().and_then(|o| o.batch_size), self.options.as_ref().and_then(|o| o.max_time), None, diff --git a/src/operation/list_indexes/test.rs b/src/operation/list_indexes/test.rs deleted file mode 100644 index 317327313..000000000 --- a/src/operation/list_indexes/test.rs +++ /dev/null @@ -1,90 +0,0 @@ -use std::time::Duration; - -use crate::{ - bson::doc, - client::options::ServerAddress, - cmap::StreamDescription, - operation::{test::handle_response_test, ListIndexes, Operation}, - options::{IndexOptions, IndexVersion, ListIndexesOptions, TextIndexVersion}, - IndexModel, - Namespace, -}; - -#[test] -fn build() { - let ns = Namespace { - db: "test_db".to_string(), - coll: "test_coll".to_string(), - }; - - let list_options = ListIndexesOptions::builder() - .max_time(Some(Duration::from_millis(42))) - .batch_size(Some(4)) - .build(); - let mut list_indexes = ListIndexes::new(ns, Some(list_options)); - - let cmd = list_indexes - .build(&StreamDescription::new_testing()) - .expect("ListIndexes command failed to build when it should have succeeded."); - - assert_eq!( - cmd.body, - doc! { - "listIndexes": "test_coll", - "maxTimeMS": 42, - "cursor": doc! { - "batchSize": 4, - }, - } - ); -} - -#[test] -fn handle_success() { - let op = ListIndexes::empty(); - - let first_batch = vec![ - IndexModel::builder() - .keys(doc! {"x": 1}) - .options(Some( - IndexOptions::builder() - .version(Some(IndexVersion::V1)) - .name(Some("foo".to_string())) - .sparse(Some(false)) - .build(), - )) - .build(), - IndexModel::builder() - .keys(doc! {"y": 1, "z": -1}) - .options(Some( - IndexOptions::builder() - .version(Some(IndexVersion::V1)) - .name(Some("x_1_z_-1".to_string())) - .text_index_version(Some(TextIndexVersion::V3)) - .default_language(Some("spanish".to_string())) - .build(), - )) - .build(), - ]; - - let response = doc! { - "cursor": { - "id": 123, - "ns": "test_db.test_coll", - "firstBatch": bson::to_bson(&first_batch).unwrap(), - }, - "ok": 1, - }; - - let cursor_spec = handle_response_test(&op, response).unwrap(); - - assert_eq!(cursor_spec.id(), 123); - assert_eq!(cursor_spec.address(), &ServerAddress::default()); - assert_eq!(cursor_spec.batch_size(), None); - assert_eq!(cursor_spec.max_time(), None); - - assert_eq!( - bson::to_bson(&cursor_spec.initial_buffer).unwrap(), - bson::to_bson(&first_batch).unwrap(), - ); -} diff --git a/src/operation/raw_output.rs b/src/operation/raw_output.rs index 254ea316b..15f6791a5 100644 --- a/src/operation/raw_output.rs +++ b/src/operation/raw_output.rs @@ -1,9 +1,12 @@ +use futures_util::FutureExt; + use crate::{ cmap::{Command, RawCommandResponse, StreamDescription}, error::Result, + BoxFuture, }; -use super::Operation; +use super::{ExecutionContext, Operation}; /// Forwards all implementation to the wrapped `Operation`, but returns the response unparsed and /// unvalidated as a `RawCommandResponse`. @@ -26,12 +29,12 @@ impl Operation for RawOutput { self.0.extract_at_cluster_time(response) } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, - ) -> Result { - Ok(response) + _context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { + async move { Ok(response) }.boxed() } fn handle_error(&self, error: crate::error::Error) -> Result { diff --git a/src/operation/run_command.rs b/src/operation/run_command.rs index 427f00214..b6e92a240 100644 --- a/src/operation/run_command.rs +++ b/src/operation/run_command.rs @@ -1,19 +1,15 @@ -#[cfg(test)] -mod test; - use std::convert::TryInto; -use bson::{RawBsonRef, RawDocumentBuf}; - -use super::{CursorBody, OperationWithDefaults}; use crate::{ - bson::Document, + bson::{Document, RawBsonRef, RawDocumentBuf}, client::SESSIONS_UNSUPPORTED_COMMANDS, cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, error::{ErrorKind, Result}, selection_criteria::SelectionCriteria, }; +use super::{CursorBody, ExecutionContext, OperationWithDefaults}; + #[derive(Debug, Clone)] pub(crate) struct RunCommand<'conn> { db: String, @@ -94,10 +90,10 @@ impl<'conn> OperationWithDefaults for RunCommand<'conn> { } } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - _description: &StreamDescription, + _context: ExecutionContext<'a>, ) -> Result { Ok(response.into_raw_document_buf().try_into()?) } diff --git a/src/operation/run_command/test.rs b/src/operation/run_command/test.rs deleted file mode 100644 index 467ccf5e1..000000000 --- a/src/operation/run_command/test.rs +++ /dev/null @@ -1,24 +0,0 @@ -use bson::Timestamp; - -use super::RunCommand; -use crate::{bson::doc, operation::test::handle_response_test}; - -#[test] -fn handle_success() { - let op = RunCommand::new("foo".into(), doc! { "hello": 1 }, None, None).unwrap(); - - let doc = doc! { - "ok": 1, - "some": "field", - "other": true, - "$clusterTime": { - "clusterTime": Timestamp { - time: 123, - increment: 345, - }, - "signature": {} - } - }; - let result_doc = handle_response_test(&op, doc.clone()).unwrap(); - assert_eq!(result_doc, doc); -} diff --git a/src/operation/run_cursor_command.rs b/src/operation/run_cursor_command.rs index 756b8c471..575930a9e 100644 --- a/src/operation/run_cursor_command.rs +++ b/src/operation/run_cursor_command.rs @@ -1,6 +1,7 @@ -use bson::RawDocumentBuf; +use futures_util::FutureExt; use crate::{ + bson::RawDocumentBuf, cmap::{conn::PinnedConnectionHandle, Command, RawCommandResponse, StreamDescription}, concern::WriteConcern, cursor::CursorSpecification, @@ -8,8 +9,11 @@ use crate::{ operation::{run_command::RunCommand, CursorBody, Operation}, options::RunCursorCommandOptions, selection_criteria::SelectionCriteria, + BoxFuture, }; +use super::ExecutionContext; + #[derive(Debug, Clone)] pub(crate) struct RunCursorCommand<'conn> { run_command: RunCommand<'conn>, @@ -85,24 +89,31 @@ impl<'conn> Operation for RunCursorCommand<'conn> { self.run_command.name() } - fn handle_response( - &self, + fn handle_response<'a>( + &'a self, response: RawCommandResponse, - description: &StreamDescription, - ) -> Result { - let cursor_response: CursorBody = response.body()?; - - let comment = match &self.options { - Some(options) => options.comment.clone(), - None => None, - }; - - Ok(CursorSpecification::new( - cursor_response.cursor, - description.server_address.clone(), - self.options.as_ref().and_then(|opts| opts.batch_size), - self.options.as_ref().and_then(|opts| opts.max_time), - comment, - )) + context: ExecutionContext<'a>, + ) -> BoxFuture<'a, Result> { + async move { + let cursor_response: CursorBody = response.body()?; + + let comment = match &self.options { + Some(options) => options.comment.clone(), + None => None, + }; + + Ok(CursorSpecification::new( + cursor_response.cursor, + context + .connection + .stream_description()? + .server_address + .clone(), + self.options.as_ref().and_then(|opts| opts.batch_size), + self.options.as_ref().and_then(|opts| opts.max_time), + comment, + )) + } + .boxed() } } diff --git a/src/operation/search_index.rs b/src/operation/search_index.rs index 986d1395b..422fcd241 100644 --- a/src/operation/search_index.rs +++ b/src/operation/search_index.rs @@ -1,9 +1,14 @@ -use bson::{doc, Document}; use serde::Deserialize; -use crate::{cmap::Command, error::Result, Namespace, SearchIndexModel}; +use crate::{ + bson::{doc, Document}, + cmap::{Command, RawCommandResponse}, + error::Result, + Namespace, + SearchIndexModel, +}; -use super::OperationWithDefaults; +use super::{ExecutionContext, OperationWithDefaults}; #[derive(Debug)] pub(crate) struct CreateSearchIndexes { @@ -33,10 +38,10 @@ impl OperationWithDefaults for CreateSearchIndexes { )) } - fn handle_response( - &self, - response: crate::cmap::RawCommandResponse, - _description: &crate::cmap::StreamDescription, + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + _context: ExecutionContext<'a>, ) -> Result { #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] @@ -105,11 +110,11 @@ impl OperationWithDefaults for UpdateSearchIndex { )) } - fn handle_response( - &self, - _response: crate::cmap::RawCommandResponse, - _description: &crate::cmap::StreamDescription, - ) -> crate::error::Result { + fn handle_response<'a>( + &'a self, + _response: RawCommandResponse, + _context: ExecutionContext<'a>, + ) -> Result { Ok(()) } @@ -153,10 +158,10 @@ impl OperationWithDefaults for DropSearchIndex { )) } - fn handle_response( - &self, - _response: crate::cmap::RawCommandResponse, - _description: &crate::cmap::StreamDescription, + fn handle_response<'a>( + &'a self, + _response: RawCommandResponse, + _context: ExecutionContext<'a>, ) -> Result { Ok(()) } diff --git a/src/operation/test.rs b/src/operation/test.rs deleted file mode 100644 index 3dacc95b9..000000000 --- a/src/operation/test.rs +++ /dev/null @@ -1,149 +0,0 @@ -use bson::{doc, Document, Timestamp}; -use serde::Deserialize; - -use crate::{ - client::ClusterTime, - cmap::{RawCommandResponse, StreamDescription}, - error::{Result, TRANSIENT_TRANSACTION_ERROR}, - operation::{CommandErrorBody, CommandResponse, Operation}, - options::{ReadPreference, SelectionCriteria}, -}; - -pub(crate) fn handle_response_test(op: &T, response_doc: Document) -> Result { - let raw = RawCommandResponse::with_document(response_doc).unwrap(); - op.handle_response(raw, &StreamDescription::new_testing()) -} - -pub(crate) fn op_selection_criteria(constructor: F) -where - T: Operation, - F: Fn(Option) -> T, -{ - let op = constructor(None); - assert_eq!(op.selection_criteria(), None); - - let read_pref: SelectionCriteria = ReadPreference::Secondary { - options: Default::default(), - } - .into(); - - let op = constructor(Some(read_pref.clone())); - assert_eq!(op.selection_criteria(), Some(&read_pref)); -} - -#[test] -fn response_success() { - let cluster_timestamp = Timestamp { - time: 123, - increment: 345, - }; - let doc = doc! { - "ok": 1, - "some": "field", - "other": true, - "$clusterTime": { - "clusterTime": cluster_timestamp, - "signature": {} - } - }; - let raw = RawCommandResponse::with_document(doc.clone()).unwrap(); - let response: CommandResponse = raw.body().unwrap(); - - assert!(response.is_success()); - assert_eq!( - response.cluster_time(), - Some(&ClusterTime { - cluster_time: cluster_timestamp, - signature: doc! {}, - }) - ); - assert_eq!(response.body, doc! { "some": "field", "other": true }); - - #[derive(Deserialize, Debug, PartialEq)] - struct Body { - some: String, - #[serde(rename = "other")] - o: bool, - #[serde(default)] - default: Option, - } - - let raw = RawCommandResponse::with_document(doc).unwrap(); - let response: CommandResponse = raw.body().unwrap(); - - assert!(response.is_success()); - assert_eq!( - response.cluster_time(), - Some(&ClusterTime { - cluster_time: cluster_timestamp, - signature: doc! {}, - }) - ); - assert_eq!( - response.body, - Body { - some: "field".to_string(), - o: true, - default: None, - } - ); -} - -#[test] -fn response_failure() { - let cluster_timestamp = Timestamp { - time: 123, - increment: 345, - }; - let doc = doc! { - "ok": 0, - "code": 123, - "codeName": "name", - "errmsg": "some message", - "errorLabels": [TRANSIENT_TRANSACTION_ERROR], - "$clusterTime": { - "clusterTime": cluster_timestamp, - "signature": {} - } - }; - let raw = RawCommandResponse::with_document(doc.clone()).unwrap(); - let response: CommandResponse = raw.body().unwrap(); - - assert!(!response.is_success()); - assert_eq!( - response.cluster_time(), - Some(&ClusterTime { - cluster_time: cluster_timestamp, - signature: doc! {}, - }) - ); - assert_eq!( - response.body, - doc! { - "code": 123, - "codeName": "name", - "errmsg": "some message", - "errorLabels": [TRANSIENT_TRANSACTION_ERROR], - } - ); - - let raw = RawCommandResponse::with_document(doc).unwrap(); - let response: CommandResponse = raw.body().unwrap(); - - assert!(!response.is_success()); - assert_eq!( - response.cluster_time(), - Some(&ClusterTime { - cluster_time: cluster_timestamp, - signature: doc! {}, - }) - ); - let command_error = response.body; - assert_eq!(command_error.command_error.code, 123); - assert_eq!(command_error.command_error.code_name, "name"); - assert_eq!(command_error.command_error.message, "some message"); - assert_eq!( - command_error.error_labels, - Some(vec![TRANSIENT_TRANSACTION_ERROR.to_string()]) - ); -} diff --git a/src/operation/update.rs b/src/operation/update.rs index a09609c73..7df20ad3a 100644 --- a/src/operation/update.rs +++ b/src/operation/update.rs @@ -1,6 +1,3 @@ -#[cfg(test)] -mod test; - use serde::Deserialize; use crate::{ @@ -14,6 +11,8 @@ use crate::{ Namespace, }; +use super::ExecutionContext; + #[derive(Clone, Debug)] pub(crate) enum UpdateOrReplace { UpdateModifications(UpdateModifications), @@ -59,17 +58,6 @@ pub(crate) struct Update { } impl Update { - #[cfg(test)] - fn empty() -> Self { - Self::with_update( - Namespace::new("db", "coll"), - doc! {}, - UpdateModifications::Document(doc! {}), - false, - None, - ) - } - pub(crate) fn with_update( ns: Namespace, filter: Document, @@ -171,12 +159,12 @@ impl OperationWithDefaults for Update { )) } - fn handle_response( - &self, - raw_response: RawCommandResponse, - _description: &StreamDescription, + fn handle_response<'a>( + &'a self, + response: RawCommandResponse, + _context: ExecutionContext<'a>, ) -> Result { - let response: WriteResponseBody = raw_response.body_utf8_lossy()?; + let response: WriteResponseBody = response.body_utf8_lossy()?; response.validate().map_err(convert_bulk_errors)?; let modified_count = response.n_modified; @@ -187,7 +175,11 @@ impl OperationWithDefaults for Update { .and_then(|doc| doc.get("_id")) .cloned(); - let matched_count = if upserted_id.is_some() { 0 } else { response.n }; + let matched_count = if upserted_id.is_some() { + 0 + } else { + response.body.n + }; Ok(UpdateResult { matched_count, @@ -213,6 +205,7 @@ impl OperationWithDefaults for Update { #[derive(Deserialize)] pub(crate) struct UpdateBody { + n: u64, #[serde(rename = "nModified")] n_modified: u64, upserted: Option>, diff --git a/src/operation/update/test.rs b/src/operation/update/test.rs deleted file mode 100644 index a4e4c4cad..000000000 --- a/src/operation/update/test.rs +++ /dev/null @@ -1,116 +0,0 @@ -use pretty_assertions::assert_eq; - -use crate::{ - bson::{doc, Bson}, - error::{ErrorKind, WriteConcernError, WriteError, WriteFailure}, - operation::{test::handle_response_test, Update}, -}; - -#[test] -fn handle_success() { - let op = Update::empty(); - - let ok_response = doc! { - "ok": 1.0, - "n": 3, - "nModified": 1, - "upserted": [ - { "index": 0, "_id": 1 } - ] - }; - - let update_result = handle_response_test(&op, ok_response).unwrap(); - assert_eq!(update_result.matched_count, 0); - assert_eq!(update_result.modified_count, 1); - assert_eq!(update_result.upserted_id, Some(Bson::Int32(1))); -} - -#[test] -fn handle_success_no_upsert() { - let op = Update::empty(); - - let ok_response = doc! { - "ok": 1.0, - "n": 5, - "nModified": 2 - }; - - let update_result = handle_response_test(&op, ok_response).unwrap(); - assert_eq!(update_result.matched_count, 5); - assert_eq!(update_result.modified_count, 2); - assert_eq!(update_result.upserted_id, None); -} - -#[test] -fn handle_write_failure() { - let op = Update::empty(); - - let write_error_response = doc! { - "ok": 1.0, - "n": 12, - "nModified": 0, - "writeErrors": [ - { - "index": 0, - "code": 1234, - "errmsg": "my error string" - } - ] - }; - - let write_error = handle_response_test(&op, write_error_response).unwrap_err(); - match *write_error.kind { - ErrorKind::Write(WriteFailure::WriteError(ref error)) => { - let expected_err = WriteError { - code: 1234, - code_name: None, - message: "my error string".to_string(), - details: None, - }; - assert_eq!(error, &expected_err); - } - ref e => panic!("expected write error, got {:?}", e), - }; -} - -#[test] -fn handle_write_concern_failure() { - let op = Update::empty(); - - let wc_error_response = doc! { - "ok": 1.0, - "n": 0, - "nModified": 0, - "writeConcernError": { - "code": 456, - "codeName": "wcError", - "errmsg": "some message", - "errInfo": { - "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } - } - } - }; - - let wc_error = handle_response_test(&op, wc_error_response).unwrap_err(); - match *wc_error.kind { - ErrorKind::Write(WriteFailure::WriteConcernError(ref wc_error)) => { - let expected_wc_err = WriteConcernError { - code: 456, - code_name: "wcError".to_string(), - message: "some message".to_string(), - details: Some(doc! { "writeConcern": { - "w": 2, - "wtimeout": 0, - "provenance": "clientSupplied" - } }), - labels: vec![], - }; - assert_eq!(wc_error, &expected_wc_err); - } - ref e => panic!("expected write concern error, got {:?}", e), - } -} diff --git a/src/results.rs b/src/results.rs index f15e5c852..4e8a82e84 100644 --- a/src/results.rs +++ b/src/results.rs @@ -1,21 +1,25 @@ //! Contains the types of results returned by CRUD operations. +mod bulk_write; + use std::collections::{HashMap, VecDeque}; +use serde::{Deserialize, Serialize}; +use serde_with::skip_serializing_none; + use crate::{ - bson::{serde_helpers, Bson, Document}, + bson::{serde_helpers, Binary, Bson, Document, RawDocumentBuf}, change_stream::event::ResumeToken, db::options::CreateCollectionOptions, serde_util, Namespace, }; -use bson::{Binary, RawDocumentBuf}; -use serde::{Deserialize, Serialize}; +pub use bulk_write::*; /// The result of a [`Collection::insert_one`](../struct.Collection.html#method.insert_one) /// operation. -#[derive(Debug, Serialize)] +#[derive(Clone, Debug, Serialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] pub struct InsertOneResult { @@ -51,7 +55,8 @@ impl InsertManyResult { /// The result of a [`Collection::update_one`](../struct.Collection.html#method.update_one) or /// [`Collection::update_many`](../struct.Collection.html#method.update_many) operation. -#[derive(Debug, Serialize)] +#[skip_serializing_none] +#[derive(Clone, Debug, Serialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] pub struct UpdateResult { @@ -69,7 +74,7 @@ pub struct UpdateResult { /// The result of a [`Collection::delete_one`](../struct.Collection.html#method.delete_one) or /// [`Collection::delete_many`](../struct.Collection.html#method.delete_many) operation. -#[derive(Debug, Serialize)] +#[derive(Clone, Debug, Serialize)] #[serde(rename_all = "camelCase")] #[non_exhaustive] pub struct DeleteResult { diff --git a/src/results/bulk_write.rs b/src/results/bulk_write.rs new file mode 100644 index 000000000..aaa794261 --- /dev/null +++ b/src/results/bulk_write.rs @@ -0,0 +1,85 @@ +#![allow(missing_docs)] + +use std::{collections::HashMap, fmt::Debug}; + +use serde::Serialize; +use serde_with::skip_serializing_none; + +use crate::{ + results::{DeleteResult, InsertOneResult, UpdateResult}, + serde_util::serialize_indexed_map, +}; + +#[skip_serializing_none] +#[derive(Clone, Debug, Serialize)] +#[serde(rename_all = "camelCase")] +#[non_exhaustive] +pub struct BulkWriteResult { + pub inserted_count: i64, + pub upserted_count: i64, + pub matched_count: i64, + pub modified_count: i64, + pub deleted_count: i64, + #[serde(serialize_with = "serialize_indexed_map")] + pub insert_results: Option>, + #[serde(serialize_with = "serialize_indexed_map")] + pub update_results: Option>, + #[serde(serialize_with = "serialize_indexed_map")] + pub delete_results: Option>, +} + +impl BulkWriteResult { + pub(crate) fn new(verbose: bool) -> Self { + Self { + inserted_count: 0, + upserted_count: 0, + matched_count: 0, + modified_count: 0, + deleted_count: 0, + insert_results: verbose.then(HashMap::new), + update_results: verbose.then(HashMap::new), + delete_results: verbose.then(HashMap::new), + } + } + + pub(crate) fn add_insert_result(&mut self, index: usize, insert_result: InsertOneResult) { + self.insert_results + .get_or_insert_with(Default::default) + .insert(index, insert_result); + } + + pub(crate) fn add_update_result(&mut self, index: usize, update_result: UpdateResult) { + self.update_results + .get_or_insert_with(Default::default) + .insert(index, update_result); + } + + pub(crate) fn add_delete_result(&mut self, index: usize, delete_result: DeleteResult) { + self.delete_results + .get_or_insert_with(Default::default) + .insert(index, delete_result); + } + + pub(crate) fn merge(&mut self, other: Self) { + self.inserted_count += other.inserted_count; + self.upserted_count += other.upserted_count; + self.matched_count += other.matched_count; + self.modified_count += other.modified_count; + self.deleted_count += other.deleted_count; + if let Some(insert_results) = other.insert_results { + self.insert_results + .get_or_insert_with(Default::default) + .extend(insert_results); + } + if let Some(update_results) = other.update_results { + self.update_results + .get_or_insert_with(Default::default) + .extend(update_results); + } + if let Some(delete_results) = other.delete_results { + self.delete_results + .get_or_insert_with(Default::default) + .extend(delete_results); + } + } +} diff --git a/src/sdam/description/topology/server_selection/test/in_window.rs b/src/sdam/description/topology/server_selection/test/in_window.rs index f884a80fc..b1506805a 100644 --- a/src/sdam/description/topology/server_selection/test/in_window.rs +++ b/src/sdam/description/topology/server_selection/test/in_window.rs @@ -17,11 +17,9 @@ use crate::{ get_client_options, log_uncaptured, run_spec_test, + util::fail_point::{FailPoint, FailPointMode}, Event, EventClient, - FailCommandOptions, - FailPoint, - FailPointMode, TestClient, }, Client, @@ -254,22 +252,18 @@ async fn load_balancing_test() { } // enable a failpoint on one of the mongoses to slow it down - let options = FailCommandOptions::builder() - .block_connection(Duration::from_millis(500)) - .build(); - let failpoint = FailPoint::fail_command(&["find"], FailPointMode::AlwaysOn, options); - let slow_host = get_client_options().await.hosts[0].clone(); - let criteria = SelectionCriteria::Predicate(Arc::new(move |si| si.address() == &slow_host)); - let fp_guard = setup_client - .enable_failpoint(failpoint, criteria) - .await - .expect("enabling failpoint should succeed"); + let slow_host_criteria = + SelectionCriteria::Predicate(Arc::new(move |si| si.address() == &slow_host)); + let fail_point = FailPoint::fail_command(&["find"], FailPointMode::AlwaysOn) + .block_connection(Duration::from_millis(500)) + .selection_criteria(slow_host_criteria); + let guard = setup_client.enable_fail_point(fail_point).await.unwrap(); // verify that the lesser picked server (slower one) was picked less than 25% of the time. do_test(&client, 0.05, 0.25, 10).await; // disable failpoint and rerun, should be back to even split - drop(fp_guard); + drop(guard); do_test(&client, 0.40, 0.50, 100).await; } diff --git a/src/sdam/description/topology/test/sdam.rs b/src/sdam/description/topology/test/sdam.rs index d1664ee4f..d15601823 100644 --- a/src/sdam/description/topology/test/sdam.rs +++ b/src/sdam/description/topology/test/sdam.rs @@ -28,11 +28,11 @@ use crate::{ get_client_options, log_uncaptured, run_spec_test, - util::event_buffer::EventBuffer, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, Event, - FailCommandOptions, - FailPoint, - FailPointMode, TestClient, }, }; @@ -674,19 +674,13 @@ async fn heartbeat_events() { options.heartbeat_freq = None; let fp_client = TestClient::with_options(Some(options)).await; - let fp_options = FailCommandOptions::builder() - .error_code(1234) - .app_name("heartbeat_events".to_string()) - .build(); - let failpoint = FailPoint::fail_command( + let fail_point = FailPoint::fail_command( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::AlwaysOn, - fp_options, - ); - let _fp_guard = fp_client - .enable_failpoint(failpoint, None) - .await - .expect("enabling failpoint should succeed"); + ) + .app_name("heartbeat_events") + .error_code(1234); + let _guard = fp_client.enable_fail_point(fail_point).await.unwrap(); subscriber .wait_for_event(Duration::from_millis(500), |event| { diff --git a/src/sdam/test.rs b/src/sdam/test.rs index 8f0c8f1e7..c37e2ba3b 100644 --- a/src/sdam/test.rs +++ b/src/sdam/test.rs @@ -16,11 +16,11 @@ use crate::{ test::{ get_client_options, log_uncaptured, - util::event_buffer::EventBuffer, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, Event, - FailCommandOptions, - FailPoint, - FailPointMode, TestClient, }, Client, @@ -46,20 +46,17 @@ async fn min_heartbeat_frequency() { return; } - let fp_options = FailCommandOptions::builder() - .app_name("SDAMMinHeartbeatFrequencyTest".to_string()) - .error_code(1234) - .build(); - let failpoint = FailPoint::fail_command( - &[LEGACY_HELLO_COMMAND_NAME, "hello"], - FailPointMode::Times(5), - fp_options, - ); - - let _fp_guard = setup_client - .enable_failpoint(failpoint, None) + let _guard = setup_client + .enable_fail_point( + FailPoint::fail_command( + &[LEGACY_HELLO_COMMAND_NAME, "hello"], + FailPointMode::Times(5), + ) + .app_name("SDAMMinHeartbeatFrequencyTest") + .error_code(1234), + ) .await - .expect("enabling failpoint should succeed"); + .unwrap(); let mut options = setup_client_options; options.app_name = Some("SDAMMinHeartbeatFrequencyTest".to_string()); @@ -132,20 +129,17 @@ async fn sdam_pool_management() { .await .expect("should see server heartbeat succeeded event"); - let fp_options = FailCommandOptions::builder() - .app_name("SDAMPoolManagementTest".to_string()) - .error_code(1234) - .build(); - let failpoint = FailPoint::fail_command( - &[LEGACY_HELLO_COMMAND_NAME, "hello"], - FailPointMode::Times(4), - fp_options, - ); - - let _fp_guard = client - .enable_failpoint(failpoint, None) + let _guard = client + .enable_fail_point( + FailPoint::fail_command( + &[LEGACY_HELLO_COMMAND_NAME, "hello"], + FailPointMode::Times(4), + ) + .app_name("SDAMPoolManagementTest") + .error_code(1234), + ) .await - .expect("enabling failpoint should succeed"); + .unwrap(); // Since there is no deterministic ordering, simply collect all the events and check for their // presence. diff --git a/src/selection_criteria.rs b/src/selection_criteria.rs index b207826b2..968c89a31 100644 --- a/src/selection_criteria.rs +++ b/src/selection_criteria.rs @@ -53,14 +53,6 @@ impl SelectionCriteria { } } - #[cfg(test)] - pub(crate) fn as_predicate(&self) -> Option<&Predicate> { - match self { - Self::Predicate(ref p) => Some(p), - _ => None, - } - } - pub(crate) fn from_address(address: ServerAddress) -> Self { SelectionCriteria::Predicate(Arc::new(move |server| server.address() == &address)) } diff --git a/src/serde_util.rs b/src/serde_util.rs index 785370298..3286b74ba 100644 --- a/src/serde_util.rs +++ b/src/serde_util.rs @@ -1,4 +1,7 @@ -use std::time::Duration; +use std::{ + collections::{BTreeMap, HashMap}, + time::Duration, +}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -184,3 +187,39 @@ where } Ok(Some(vec)) } + +pub(crate) fn serialize_indexed_map( + map: &Option>, + serializer: S, +) -> std::result::Result { + if let Some(map) = map { + let string_map: BTreeMap<_, _> = map + .iter() + .map(|(index, result)| (index.to_string(), result)) + .collect(); + string_map.serialize(serializer) + } else { + serializer.serialize_none() + } +} + +#[cfg(test)] +pub(crate) fn deserialize_indexed_map<'de, D, T>( + deserializer: D, +) -> std::result::Result>, D::Error> +where + D: Deserializer<'de>, + T: serde::de::DeserializeOwned, +{ + use std::str::FromStr; + + let string_map: HashMap = HashMap::deserialize(deserializer)?; + Ok(Some(string_map.into_iter().try_fold( + HashMap::new(), + |mut map, (index, t)| { + let index = usize::from_str(&index).map_err(serde::de::Error::custom)?; + map.insert(index, t); + Ok(map) + }, + )?)) +} diff --git a/src/test.rs b/src/test.rs index a0dea5362..461a52371 100644 --- a/src/test.rs +++ b/src/test.rs @@ -6,6 +6,7 @@ mod atlas_connectivity; mod atlas_planned_maintenance_testing; #[cfg(feature = "aws-auth")] mod auth_aws; +mod bulk_write; mod change_stream; mod client; mod coll; @@ -38,9 +39,6 @@ pub(crate) use self::{ file_level_log, log_uncaptured, Event, - FailCommandOptions, - FailPoint, - FailPointMode, MatchErrExt, Matchable, TestClient, diff --git a/src/test/bulk_write.rs b/src/test/bulk_write.rs new file mode 100644 index 000000000..a93320cbb --- /dev/null +++ b/src/test/bulk_write.rs @@ -0,0 +1,565 @@ +use crate::{ + bson::{doc, Document}, + error::{ClientBulkWriteError, ErrorKind}, + options::WriteModel, + test::{ + get_client_options, + log_uncaptured, + spec::unified_runner::run_unified_tests, + util::fail_point::{FailPoint, FailPointMode}, + }, + Client, + Namespace, +}; + +use super::TestClient; + +#[tokio::test(flavor = "multi_thread")] +async fn run_unified() { + run_unified_tests(&["crud", "unified", "new-bulk-write"]) + // TODO RUST-1405: unskip this test + .skip_files(&["client-bulkWrite-errorResponse.json"]) + .await; +} + +// CRUD prose test 3 +#[tokio::test] +async fn max_write_batch_size_batching() { + let client = Client::test_builder().monitor_events().build().await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping max_write_batch_size_batching: bulkWrite requires 8.0+"); + return; + } + + let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; + + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document: doc! { "a": "b" }, + }; + let models = vec![model; max_write_batch_size + 1]; + + let result = client.bulk_write(models).await.unwrap(); + assert_eq!(result.inserted_count as usize, max_write_batch_size + 1); + + let mut command_started_events = client + .events + .get_command_started_events(&["bulkWrite"]) + .into_iter(); + + let first_event = command_started_events + .next() + .expect("no first event observed"); + let first_len = first_event.command.get_array("ops").unwrap().len(); + assert_eq!(first_len, max_write_batch_size); + + let second_event = command_started_events + .next() + .expect("no second event observed"); + let second_len = second_event.command.get_array("ops").unwrap().len(); + assert_eq!(second_len, 1); +} + +// CRUD prose test 4 +#[tokio::test] +async fn max_message_size_bytes_batching() { + let client = Client::test_builder().monitor_events().build().await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping max_message_size_bytes_batching: bulkWrite requires 8.0+"); + return; + } + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; + + let document = doc! { "a": "b".repeat(max_bson_object_size - 500) }; + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document, + }; + let num_models = max_message_size_bytes / max_bson_object_size + 1; + let models = vec![model; num_models]; + + let result = client.bulk_write(models).await.unwrap(); + assert_eq!(result.inserted_count as usize, num_models); + + let mut command_started_events = client + .events + .get_command_started_events(&["bulkWrite"]) + .into_iter(); + + let first_event = command_started_events + .next() + .expect("no first event observed"); + let first_len = first_event.command.get_array("ops").unwrap().len(); + assert_eq!(first_len, num_models - 1); + + let second_event = command_started_events + .next() + .expect("no second event observed"); + let second_len = second_event.command.get_array("ops").unwrap().len(); + assert_eq!(second_len, 1); +} + +// CRUD prose test 5 +#[tokio::test(flavor = "multi_thread")] +async fn write_concern_error_batches() { + let mut options = get_client_options().await.clone(); + options.retry_writes = Some(false); + if TestClient::new().await.is_sharded() { + options.hosts.drain(1..); + } + let client = Client::test_builder() + .options(options) + .monitor_events() + .build() + .await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping write_concern_error_batches: bulkWrite requires 8.0+"); + return; + } + + let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; + + let fail_point = FailPoint::fail_command(&["bulkWrite"], FailPointMode::Times(2)) + .write_concern_error(doc! { "code": 91, "errmsg": "Replication is being shut down" }); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); + + let models = vec![ + WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document: doc! { "a": "b" } + }; + max_write_batch_size + 1 + ]; + let error = client.bulk_write(models).ordered(false).await.unwrap_err(); + + let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { + panic!("Expected bulk write error, got {:?}", error); + }; + + assert_eq!(bulk_write_error.write_concern_errors.len(), 2); + + let partial_result = bulk_write_error.partial_result.unwrap(); + assert_eq!( + partial_result.inserted_count as usize, + max_write_batch_size + 1 + ); + + let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); + assert_eq!(command_started_events.len(), 2); +} + +// CRUD prose test 6 +#[tokio::test] +async fn write_error_batches() { + let mut client = Client::test_builder().monitor_events().build().await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping write_error_batches: bulkWrite requires 8.0+"); + return; + } + + let max_write_batch_size = client.server_info.max_write_batch_size.unwrap() as usize; + + let document = doc! { "_id": 1 }; + let collection = client.database("db").collection("coll"); + collection.drop().await.unwrap(); + collection.insert_one(document.clone()).await.unwrap(); + + let models = vec![ + WriteModel::InsertOne { + namespace: collection.namespace(), + document, + }; + max_write_batch_size + 1 + ]; + + let error = client + .bulk_write(models.clone()) + .ordered(false) + .await + .unwrap_err(); + + let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { + panic!("Expected bulk write error, got {:?}", error); + }; + + assert_eq!( + bulk_write_error.write_errors.len(), + max_write_batch_size + 1 + ); + + let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); + assert_eq!(command_started_events.len(), 2); + + client.events.clear_cached_events(); + + let error = client.bulk_write(models).ordered(true).await.unwrap_err(); + + let ErrorKind::ClientBulkWrite(bulk_write_error) = *error.kind else { + panic!("Expected bulk write error, got {:?}", error); + }; + + assert_eq!(bulk_write_error.write_errors.len(), 1); + + let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); + assert_eq!(command_started_events.len(), 1); +} + +// CRUD prose test 7 +#[tokio::test] +async fn successful_cursor_iteration() { + let client = Client::test_builder().monitor_events().build().await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping successful_cursor_iteration: bulkWrite requires 8.0+"); + return; + } + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + + let collection = client.database("db").collection::("coll"); + collection.drop().await.unwrap(); + + let models = vec![ + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "a".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "b".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + ]; + + let result = client + .bulk_write(models) + .verbose_results(true) + .await + .unwrap(); + assert_eq!(result.upserted_count, 2); + assert_eq!(result.update_results.unwrap().len(), 2); + + let command_started_events = client.events.get_command_started_events(&["getMore"]); + assert_eq!(command_started_events.len(), 1); +} + +// CRUD prose test 8 +#[tokio::test] +async fn cursor_iteration_in_a_transaction() { + let client = Client::test_builder().monitor_events().build().await; + + if client.server_version_lt(8, 0) || client.is_standalone() { + log_uncaptured( + "skipping cursor_iteration_in_a_transaction: bulkWrite requires 8.0+, transactions \ + require a non-standalone topology", + ); + return; + } + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + + let collection = client.database("db").collection::("coll"); + collection.drop().await.unwrap(); + + let mut session = client.start_session().await.unwrap(); + session.start_transaction().await.unwrap(); + + let models = vec![ + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "a".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "b".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + ]; + + let result = client + .bulk_write(models) + .verbose_results(true) + .session(&mut session) + .await + .unwrap(); + assert_eq!(result.upserted_count, 2); + assert_eq!(result.update_results.unwrap().len(), 2); + + let command_started_events = client.events.get_command_started_events(&["getMore"]); + assert_eq!(command_started_events.len(), 1); +} + +// CRUD prose test 9 +#[tokio::test(flavor = "multi_thread")] +async fn failed_cursor_iteration() { + let mut options = get_client_options().await.clone(); + if TestClient::new().await.is_sharded() { + options.hosts.drain(1..); + } + let client = Client::test_builder() + .options(options) + .monitor_events() + .build() + .await; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping failed_cursor_iteration: bulkWrite requires 8.0+"); + return; + } + + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(8); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); + + let collection = client.database("db").collection::("coll"); + collection.drop().await.unwrap(); + + let models = vec![ + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "a".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + WriteModel::UpdateOne { + namespace: collection.namespace(), + filter: doc! { "_id": "b".repeat(max_bson_object_size / 2) }, + update: doc! { "$set": { "x": 1 } }.into(), + array_filters: None, + collation: None, + hint: None, + upsert: Some(true), + }, + ]; + + let error = client + .bulk_write(models) + .verbose_results(true) + .await + .unwrap_err(); + + let Some(ref source) = error.source else { + panic!("Expected error to contain source"); + }; + assert_eq!(source.code(), Some(8)); + + let ErrorKind::ClientBulkWrite(ClientBulkWriteError { + partial_result: Some(partial_result), + .. + }) = *error.kind + else { + panic!( + "Expected bulk write error with partial result, got {:?}", + error + ); + }; + assert_eq!(partial_result.upserted_count, 2); + assert_eq!(partial_result.update_results.unwrap().len(), 1); + + let get_more_events = client.events.get_command_started_events(&["getMore"]); + assert_eq!(get_more_events.len(), 1); + + let kill_cursors_events = client.events.get_command_started_events(&["killCursors"]); + assert_eq!(kill_cursors_events.len(), 1); +} + +// CRUD prose test 10 not implemented. The driver does not support unacknowledged writes. + +// CRUD prose test 11 +#[tokio::test] +async fn namespace_batch_splitting() { + let first_namespace = Namespace::new("db", "coll"); + + let mut client = Client::test_builder().monitor_events().build().await; + if client.server_version_lt(8, 0) { + log_uncaptured("skipping namespace_batch_splitting: bulkWrite requires 8.0+"); + return; + } + + let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; + let max_bson_object_size = client.server_info.max_bson_object_size as usize; + + let ops_bytes = max_message_size_bytes - 1122; + let num_models = ops_bytes / max_bson_object_size; + + let model = WriteModel::InsertOne { + namespace: first_namespace.clone(), + document: doc! { "a": "b".repeat(max_bson_object_size - 57) }, + }; + let mut models = vec![model; num_models]; + + let remainder_bytes = ops_bytes % max_bson_object_size; + if remainder_bytes >= 217 { + models.push(WriteModel::InsertOne { + namespace: first_namespace.clone(), + document: doc! { "a": "b".repeat(remainder_bytes - 57) }, + }); + } + + // Case 1: no batch-splitting required + + let mut first_models = models.clone(); + first_models.push(WriteModel::InsertOne { + namespace: first_namespace.clone(), + document: doc! { "a": "b" }, + }); + let num_models = first_models.len(); + + let result = client.bulk_write(first_models).await.unwrap(); + assert_eq!(result.inserted_count as usize, num_models); + + let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); + assert_eq!(command_started_events.len(), 1); + + let event = &command_started_events[0]; + + let ops = event.command.get_array("ops").unwrap(); + assert_eq!(ops.len(), num_models); + + let ns_info = event.command.get_array("nsInfo").unwrap(); + assert_eq!(ns_info.len(), 1); + let namespace = ns_info[0].as_document().unwrap().get_str("ns").unwrap(); + assert_eq!(namespace, &first_namespace.to_string()); + + // Case 2: batch-splitting required + + client.events.clear_cached_events(); + + let second_namespace = Namespace::new("db", "c".repeat(200)); + + let mut second_models = models.clone(); + second_models.push(WriteModel::InsertOne { + namespace: second_namespace.clone(), + document: doc! { "a": "b" }, + }); + let num_models = second_models.len(); + + let result = client.bulk_write(second_models).await.unwrap(); + assert_eq!(result.inserted_count as usize, num_models); + + let command_started_events = client.events.get_command_started_events(&["bulkWrite"]); + assert_eq!(command_started_events.len(), 2); + + let first_event = &command_started_events[0]; + + let first_ops = first_event.command.get_array("ops").unwrap(); + assert_eq!(first_ops.len(), num_models - 1); + + let first_ns_info = first_event.command.get_array("nsInfo").unwrap(); + assert_eq!(first_ns_info.len(), 1); + let actual_first_namespace = first_ns_info[0] + .as_document() + .unwrap() + .get_str("ns") + .unwrap(); + assert_eq!(actual_first_namespace, &first_namespace.to_string()); + + let second_event = &command_started_events[1]; + + let second_ops = second_event.command.get_array("ops").unwrap(); + assert_eq!(second_ops.len(), 1); + + let second_ns_info = second_event.command.get_array("nsInfo").unwrap(); + assert_eq!(second_ns_info.len(), 1); + let actual_second_namespace = second_ns_info[0] + .as_document() + .unwrap() + .get_str("ns") + .unwrap(); + assert_eq!(actual_second_namespace, &second_namespace.to_string()); +} + +// CRUD prose test 12 +#[tokio::test] +async fn too_large_client_error() { + let client = Client::test_builder().monitor_events().build().await; + let max_message_size_bytes = client.server_info.max_message_size_bytes as usize; + + if client.server_version_lt(8, 0) { + log_uncaptured("skipping too_large_client_error: bulkWrite requires 8.0+"); + return; + } + + // Case 1: document too large + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document: doc! { "a": "b".repeat(max_message_size_bytes) }, + }; + + let error = client.bulk_write(vec![model]).await.unwrap_err(); + assert!(!error.is_server_error()); + + // Case 2: namespace too large + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "c".repeat(max_message_size_bytes)), + document: doc! { "a": "b" }, + }; + + let error = client.bulk_write(vec![model]).await.unwrap_err(); + assert!(!error.is_server_error()); +} + +// CRUD prose test 13 +#[cfg(feature = "in-use-encryption-unstable")] +#[tokio::test] +async fn encryption_error() { + use crate::{ + client::csfle::options::{AutoEncryptionOptions, KmsProviders}, + mongocrypt::ctx::KmsProvider, + }; + + let kms_providers = KmsProviders::new(vec![( + KmsProvider::Aws, + doc! { "accessKeyId": "foo", "secretAccessKey": "bar" }, + None, + )]) + .unwrap(); + let encrypted_options = AutoEncryptionOptions::new(Namespace::new("db", "coll"), kms_providers); + let encrypted_client = Client::test_builder() + .encrypted_options(encrypted_options) + .build() + .await; + + let model = WriteModel::InsertOne { + namespace: Namespace::new("db", "coll"), + document: doc! { "a": "b" }, + }; + let error = encrypted_client.bulk_write(vec![model]).await.unwrap_err(); + + let ErrorKind::Encryption(encryption_error) = *error.kind else { + panic!("expected encryption error, got {:?}", error); + }; + + assert_eq!( + encryption_error.message, + Some("bulkWrite does not currently support automatic encryption".to_string()) + ); +} diff --git a/src/test/change_stream.rs b/src/test/change_stream.rs index 4efcdbbed..19bea9e30 100644 --- a/src/test/change_stream.rs +++ b/src/test/change_stream.rs @@ -12,7 +12,7 @@ use crate::{ db::options::ChangeStreamPreAndPostImages, event::command::{CommandEvent, CommandStartedEvent, CommandSucceededEvent}, options::{Acknowledgment, WriteConcern}, - test::{FailCommandOptions, FailPoint, FailPointMode}, + test::util::fail_point::{FailPoint, FailPointMode}, Client, Collection, }; @@ -172,13 +172,8 @@ async fn resumes_on_error() -> Result<()> { }) if key == doc! { "_id": 1 } )); - let _guard = FailPoint::fail_command( - &["getMore"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(43); + let _guard = client.enable_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }).await?; assert!(matches!(stream.next().await.transpose()?, @@ -205,13 +200,9 @@ async fn does_not_resume_aggregate() -> Result<()> { None => return Ok(()), }; - let _guard = FailPoint::fail_command( - &["aggregate"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = + FailPoint::fail_command(&["aggregate"], FailPointMode::Times(1)).error_code(43); + let _guard = client.enable_fail_point(fail_point).await?; assert!(coll.watch().await.is_err()); @@ -277,13 +268,9 @@ async fn resume_kill_cursor_error_suppressed() -> Result<()> { }) if key == doc! { "_id": 1 } )); - let _guard = FailPoint::fail_command( - &["getMore", "killCursors"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = FailPoint::fail_command(&["getMore", "killCursors"], FailPointMode::Times(1)) + .error_code(43); + let _guard = client.enable_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }).await?; assert!(matches!(stream.next().await.transpose()?, @@ -322,13 +309,8 @@ async fn resume_start_at_operation_time() -> Result<()> { return Ok(()); } - let _guard = FailPoint::fail_command( - &["getMore"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(43); + let _guard = client.enable_fail_point(fail_point).await?; coll.insert_one(doc! { "_id": 2 }).await?; stream.next().await.transpose()?; @@ -538,13 +520,8 @@ async fn resume_uses_start_after() -> Result<()> { // Create an event, and synthesize a resumable error when calling `getMore` for that event. coll.insert_one(doc! {}).await?; - let _guard = FailPoint::fail_command( - &["getMore"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(43); + let _guard = client.enable_fail_point(fail_point).await?; stream.next().await.transpose()?; let commands = client.events.get_command_started_events(&["aggregate"]); @@ -598,13 +575,8 @@ async fn resume_uses_resume_after() -> Result<()> { // Create an event, and synthesize a resumable error when calling `getMore` for that event. coll.insert_one(doc! {}).await?; - let _guard = FailPoint::fail_command( - &["getMore"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(43).build(), - ) - .enable(&client, None) - .await?; + let fail_point = FailPoint::fail_command(&["getMore"], FailPointMode::Times(1)).error_code(43); + let _guard = client.enable_fail_point(fail_point).await?; stream.next().await.transpose()?; let commands = client.events.get_command_started_events(&["aggregate"]); diff --git a/src/test/client.rs b/src/test/client.rs index 932814641..28734d5ac 100644 --- a/src/test/client.rs +++ b/src/test/client.rs @@ -14,11 +14,12 @@ use crate::{ test::{ get_client_options, log_uncaptured, - util::{event_buffer::EventBuffer, TestClient}, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + TestClient, + }, Event, - FailCommandOptions, - FailPoint, - FailPointMode, SERVER_API, }, Client, @@ -707,14 +708,10 @@ async fn retry_commit_txn_check_out() { .await .unwrap(); - // enable a fail point that clears the connection pools so that - // commitTransaction will create a new connection during check out. - let fp = FailPoint::fail_command( - &["ping"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(11600).build(), - ); - let _guard = setup_client.enable_failpoint(fp, None).await.unwrap(); + // Enable a fail point that clears the connection pools so that commitTransaction will create a + // new connection during checkout. + let fail_point = FailPoint::fail_command(&["ping"], FailPointMode::Times(1)).error_code(11600); + let _guard = setup_client.enable_fail_point(fail_point).await.unwrap(); #[allow(deprecated)] let mut subscriber = buffer.subscribe(); @@ -758,17 +755,13 @@ async fn retry_commit_txn_check_out() { .await .expect("should see mark available event"); - // enable a failpoint on the handshake to cause check_out - // to fail with a retryable error - let fp = FailPoint::fail_command( + let fail_point = FailPoint::fail_command( &[LEGACY_HELLO_COMMAND_NAME, "hello"], FailPointMode::Times(1), - FailCommandOptions::builder() - .error_code(11600) - .app_name("retry_commit_txn_check_out".to_string()) - .build(), - ); - let _guard2 = setup_client.enable_failpoint(fp, None).await.unwrap(); + ) + .error_code(11600) + .app_name("retry_commit_txn_check_out"); + let _guard2 = setup_client.enable_fail_point(fail_point).await.unwrap(); // finally, attempt the commit. // this should succeed due to retry diff --git a/src/test/coll.rs b/src/test/coll.rs index 23c1350ca..83c029e15 100644 --- a/src/test/coll.rs +++ b/src/test/coll.rs @@ -1,14 +1,20 @@ use std::{fmt::Debug, time::Duration}; -use crate::{event::command::CommandEvent, test::Event, Client, Namespace}; -use bson::{rawdoc, serde_helpers::HumanReadable, RawDocumentBuf}; use futures::stream::{StreamExt, TryStreamExt}; use once_cell::sync::Lazy; use semver::VersionReq; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ - bson::{doc, to_document, Bson, Document}, + bson::{ + doc, + rawdoc, + serde_helpers::HumanReadable, + to_document, + Bson, + Document, + RawDocumentBuf, + }, error::{ErrorKind, Result, WriteFailure}, options::{ Acknowledgment, @@ -26,9 +32,11 @@ use crate::{ }, results::DeleteResult, test::{get_client_options, log_uncaptured, util::TestClient, EventClient}, + Client, Collection, Cursor, IndexModel, + Namespace, }; #[tokio::test] @@ -1249,19 +1257,11 @@ async fn insert_many_document_sequences() { ]; collection.insert_many(docs).await.unwrap(); - let event = subscriber - .filter_map_event(Duration::from_millis(500), |e| match e { - Event::Command(command_event) => match command_event { - CommandEvent::Started(started) if started.command_name.as_str() == "insert" => { - Some(started) - } - _ => None, - }, - _ => None, - }) + let (started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "insert") .await - .expect("did not observe command started event for insert"); - let insert_documents = event.command.get_array("documents").unwrap(); + .expect("did not observe successful command events for insert"); + let insert_documents = started.command.get_array("documents").unwrap(); assert_eq!(insert_documents.len(), 2); // Build up a list of documents that exceeds max_message_size @@ -1277,34 +1277,18 @@ async fn insert_many_document_sequences() { let total_docs = docs.len(); collection.insert_many(docs).await.unwrap(); - let first_event = subscriber - .filter_map_event(Duration::from_millis(500), |e| match e { - Event::Command(command_event) => match command_event { - CommandEvent::Started(started) if started.command_name.as_str() == "insert" => { - Some(started) - } - _ => None, - }, - _ => None, - }) + let (first_started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "insert") .await - .expect("did not observe command started event for insert"); - let first_batch_len = first_event.command.get_array("documents").unwrap().len(); + .expect("did not observe successful command events for insert"); + let first_batch_len = first_started.command.get_array("documents").unwrap().len(); assert!(first_batch_len < total_docs); - let second_event = subscriber - .filter_map_event(Duration::from_millis(500), |e| match e { - Event::Command(command_event) => match command_event { - CommandEvent::Started(started) if started.command_name.as_str() == "insert" => { - Some(started) - } - _ => None, - }, - _ => None, - }) + let (second_started, _) = subscriber + .wait_for_successful_command_execution(Duration::from_millis(500), "insert") .await - .expect("did not observe command started event for insert"); - let second_batch_len = second_event.command.get_array("documents").unwrap().len(); + .expect("did not observe successful command events for insert"); + let second_batch_len = second_started.command.get_array("documents").unwrap().len(); assert_eq!(first_batch_len + second_batch_len, total_docs); } diff --git a/src/test/csfle.rs b/src/test/csfle.rs index 505354528..7a54da56a 100644 --- a/src/test/csfle.rs +++ b/src/test/csfle.rs @@ -44,22 +44,20 @@ use crate::{ WriteConcern, }, runtime, - test::{util::event_buffer::EventBuffer, Event}, + test::{ + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, + Event, + }, Client, Collection, IndexModel, Namespace, }; -use super::{ - get_client_options, - log_uncaptured, - EventClient, - FailCommandOptions, - FailPoint, - FailPointMode, - TestClient, -}; +use super::{get_client_options, log_uncaptured, EventClient, TestClient}; type Result = anyhow::Result; @@ -2414,12 +2412,9 @@ async fn decryption_events_command_error() -> Result<()> { None => return Ok(()), }; - let fp = FailPoint::fail_command( - &["aggregate"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(123).build(), - ); - let _guard = fp.enable(&td.setup_client, None).await?; + let fail_point = + FailPoint::fail_command(&["aggregate"], FailPointMode::Times(1)).error_code(123); + let _guard = td.setup_client.enable_fail_point(fail_point).await.unwrap(); let err = td .decryption_events .aggregate(vec![doc! { "$count": "total" }]) @@ -2443,15 +2438,10 @@ async fn decryption_events_network_error() -> Result<()> { None => return Ok(()), }; - let fp = FailPoint::fail_command( - &["aggregate"], - FailPointMode::Times(1), - FailCommandOptions::builder() - .error_code(123) - .close_connection(true) - .build(), - ); - let _guard = fp.enable(&td.setup_client, None).await?; + let fail_point = FailPoint::fail_command(&["aggregate"], FailPointMode::Times(1)) + .error_code(123) + .close_connection(true); + let _guard = td.setup_client.enable_fail_point(fail_point).await.unwrap(); let err = td .decryption_events .aggregate(vec![doc! { "$count": "total" }]) diff --git a/src/test/spec.rs b/src/test/spec.rs index e17776ba1..8c88778a5 100644 --- a/src/test/spec.rs +++ b/src/test/spec.rs @@ -86,14 +86,16 @@ pub(crate) fn deserialize_spec_tests( .unwrap_or_else(|e| panic!("Failed to open file at {:?}: {}", &path, e)); // Use BSON as an intermediary to deserialize extended JSON properly. - let test_bson: Bson = serde_json::from_reader(file).unwrap_or_else(|e| { + let deserializer = &mut serde_json::Deserializer::from_reader(file); + let test_bson: Bson = serde_path_to_error::deserialize(deserializer).unwrap_or_else(|e| { panic!( "Failed to deserialize test JSON to BSON in {:?}: {}", &path, e ) }); - let de = bson::Deserializer::new(test_bson); - let test: T = serde_path_to_error::deserialize(de).unwrap_or_else(|e| { + + let deserializer = bson::Deserializer::new(test_bson); + let test: T = serde_path_to_error::deserialize(deserializer).unwrap_or_else(|e| { panic!( "Failed to deserialize test BSON to {} in {:?}: {}", type_name::(), diff --git a/src/test/spec/gridfs.rs b/src/test/spec/gridfs.rs index 53332c95d..5fd5403d0 100644 --- a/src/test/spec/gridfs.rs +++ b/src/test/spec/gridfs.rs @@ -11,9 +11,7 @@ use crate::{ test::{ get_client_options, spec::unified_runner::run_unified_tests, - FailCommandOptions, - FailPoint, - FailPointMode, + util::fail_point::{FailPoint, FailPointMode}, TestClient, }, }; @@ -232,14 +230,8 @@ async fn upload_stream_errors() { .await .unwrap(); - let _fp_guard = FailPoint::fail_command( - &["insert"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(1234).build(), - ) - .enable(&client, None) - .await - .unwrap(); + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)).error_code(1234); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let error = get_mongo_error(upload_stream.write_all(&[11]).await); assert_eq!(error.sdam_code(), Some(1234)); @@ -255,14 +247,8 @@ async fn upload_stream_errors() { upload_stream.write_all(&[11]).await.unwrap(); - let _fp_guard = FailPoint::fail_command( - &["insert"], - FailPointMode::Times(1), - FailCommandOptions::builder().error_code(1234).build(), - ) - .enable(&client, None) - .await - .unwrap(); + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)).error_code(1234); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let error = get_mongo_error(upload_stream.close().await); assert_eq!(error.sdam_code(), Some(1234)); diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.json new file mode 100644 index 000000000..64b1988ff --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.json @@ -0,0 +1,350 @@ +{ + "description": "client bulkWrite retryable writes with client errors", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with one network error succeeds after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with two network errors fails after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.yml new file mode 100644 index 000000000..d1b08ec37 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-clientErrors.yml @@ -0,0 +1,172 @@ +description: "client bulkWrite retryable writes with client errors" +schemaVersion: "1.20" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with one network error succeeds after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 4 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with two network errors fails after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 2 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectError: + isClientError: true + errorLabelsContain: ["RetryableWriteError"] # Error label added by driver. + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json new file mode 100644 index 000000000..5bdf2b124 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.json @@ -0,0 +1,267 @@ +{ + "description": "client bulkWrite delete options", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulk write delete with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + }, + { + "description": "client bulk write delete with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "hint": "_id_" + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 3, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "1": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "delete": 0, + "filter": { + "_id": 1 + }, + "hint": "_id_", + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": { + "$gt": 1 + } + }, + "hint": "_id_", + "multi": true + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.yml new file mode 100644 index 000000000..db8b9f46d --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-delete-options.yml @@ -0,0 +1,136 @@ +description: "client bulkWrite delete options" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + collation: &collation { "locale": "simple" } + hint: &hint _id_ + +tests: + - description: "client bulk write delete with collation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + collation: *collation + - deleteMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + collation: *collation + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 3 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 1: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - delete: 0 + filter: { _id: 1 } + collation: *collation + multi: false + - delete: 0 + filter: { _id: { $gt: 1 } } + collation: *collation + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] + - description: "client bulk write delete with hint" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + hint: *hint + - deleteMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + hint: *hint + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 3 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 1: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - delete: 0 + filter: { _id: 1 } + hint: *hint + multi: false + - delete: 0 + filter: { _id: { $gt: 1 } } + hint: *hint + multi: true + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.json new file mode 100644 index 000000000..edf2339d8 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.json @@ -0,0 +1,68 @@ +{ + "description": "client bulkWrite errorResponse", + "schemaVersion": "1.12", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "useMultipleMongoses": false + } + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite operations support errorResponse assertions", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1 + } + } + } + ] + }, + "expectError": { + "errorCode": 8, + "errorResponse": { + "code": 8 + } + } + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.yml new file mode 100644 index 000000000..45e53171e --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errorResponse.yml @@ -0,0 +1,37 @@ +description: "client bulkWrite errorResponse" +schemaVersion: "1.12" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + useMultipleMongoses: false # Avoid setting fail points with multiple mongoses + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite operations support errorResponse assertions" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: [ bulkWrite ] + errorCode: &errorCode 8 # UnknownError + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1 } + expectError: + errorCode: *errorCode + errorResponse: + code: *errorCode diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json new file mode 100644 index 000000000..0c3849973 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.json @@ -0,0 +1,454 @@ +{ + "description": "client bulkWrite errors", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "uriOptions": { + "retryWrites": false + }, + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "writeConcernErrorCode": 91, + "writeConcernErrorMessage": "Replication is being shut down", + "undefinedVarCode": 17276 + }, + "tests": [ + { + "description": "an individual operation fails during an ordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "an individual operation fails during an unordered bulkWrite", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 2, + "insertResults": {}, + "updateResults": {}, + "deleteResults": { + "0": { + "deletedCount": 1 + }, + "2": { + "deletedCount": 1 + } + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 2, + "x": 22 + } + ] + } + ] + }, + { + "description": "detailed results are omitted from error when verboseResults is false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": false + }, + "expectError": { + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 1, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + }, + "writeErrors": { + "1": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a top-level failure occurs during a bulkWrite", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 8 + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "x": 1 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "errorCode": 8 + } + } + ] + }, + { + "description": "a bulk write with only errors does not report a partial result", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "$$unsetOrMatches": {} + }, + "writeErrors": { + "0": { + "code": 17276 + } + } + } + } + ] + }, + { + "description": "a write concern error occurs during a bulkWrite", + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 10 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 10 + } + }, + "updateResults": {}, + "deleteResults": {} + }, + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ] + }, + { + "description": "an empty list of write models is a client-side error", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [], + "verboseResults": true + }, + "expectError": { + "isClientError": true + } + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml new file mode 100644 index 000000000..6d5012229 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-errors.yml @@ -0,0 +1,240 @@ +description: "client bulkWrite errors" +schemaVersion: "1.20" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + uriOptions: + retryWrites: false + useMultipleMongoses: false # Target a single mongos with failpoint + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + writeConcernErrorCode: &writeConcernErrorCode 91 + writeConcernErrorMessage: &writeConcernErrorMessage "Replication is being shut down" + undefinedVarCode: &undefinedVarCode 17276 # Use of an undefined variable + +tests: + - description: "an individual operation fails during an ordered bulkWrite" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 1 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + writeErrors: + 1: + code: *undefinedVarCode + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "an individual operation fails during an unordered bulkWrite" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + ordered: false + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 2 + insertResults: {} + updateResults: {} + deleteResults: + 0: + deletedCount: 1 + 2: + deletedCount: 1 + writeErrors: + 1: + code: *undefinedVarCode + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 2, x: 22 } + - description: "detailed results are omitted from error when verboseResults is false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: { _id: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: false + expectError: + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 1 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + writeErrors: + 1: + code: *undefinedVarCode + - description: "a top-level failure occurs during a bulkWrite" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - bulkWrite + errorCode: 8 # UnknownError + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { x: 1 } + verboseResults: true + expectError: + errorCode: 8 + - description: "a bulk write with only errors does not report a partial result" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] # Attempt to access a nonexistent let var + verboseResults: true + expectError: + expectResult: + $$unsetOrMatches: {} # Empty or nonexistent result when no successful writes occurred + writeErrors: + 0: + code: *undefinedVarCode + - description: "a write concern error occurs during a bulkWrite" + operations: + - name: failPoint + object: testRunner + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: + - bulkWrite + writeConcernError: + code: *writeConcernErrorCode + errmsg: *writeConcernErrorMessage + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 10 } + verboseResults: true + expectError: + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 10 + updateResults: {} + deleteResults: {} + writeConcernErrors: + - code: *writeConcernErrorCode + message: *writeConcernErrorMessage + - description: "an empty list of write models is a client-side error" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: [] + verboseResults: true + expectError: + isClientError: true diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json new file mode 100644 index 000000000..f90755dc8 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.json @@ -0,0 +1,314 @@ +{ + "description": "client bulkWrite with mixed namespaces", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "db0" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "collection": { + "id": "collection1", + "database": "database0", + "collectionName": "coll1" + } + }, + { + "database": { + "id": "database1", + "client": "client0", + "databaseName": "db1" + } + }, + { + "collection": { + "id": "collection2", + "database": "database1", + "collectionName": "coll2" + } + } + ], + "initialData": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ], + "_yamlAnchors": { + "db0Coll0Namespace": "db0.coll0", + "db0Coll1Namespace": "db0.coll1", + "db1Coll2Namespace": "db1.coll2" + }, + "tests": [ + { + "description": "client bulkWrite with mixed namespaces", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 1 + } + } + }, + { + "insertOne": { + "namespace": "db0.coll0", + "document": { + "_id": 2 + } + } + }, + { + "updateOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 3 + } + } + }, + { + "deleteOne": { + "namespace": "db0.coll1", + "filter": { + "_id": 2 + } + } + }, + { + "replaceOne": { + "namespace": "db1.coll2", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 45 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 2, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 2, + "insertResults": { + "0": { + "insertedId": 1 + }, + "1": { + "insertedId": 2 + } + }, + "updateResults": { + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "5": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + }, + "4": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "command": { + "bulkWrite": 1, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1 + } + }, + { + "insert": 0, + "document": { + "_id": 2 + } + }, + { + "update": 1, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 2, + "filter": { + "_id": 3 + }, + "multi": false + }, + { + "delete": 1, + "filter": { + "_id": 2 + }, + "multi": false + }, + { + "update": 2, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 45 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "db0.coll0" + }, + { + "ns": "db0.coll1" + }, + { + "ns": "db1.coll2" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "db0", + "collectionName": "coll0", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + }, + { + "databaseName": "db0", + "collectionName": "coll1", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + }, + { + "databaseName": "db1", + "collectionName": "coll2", + "documents": [ + { + "_id": 4, + "x": 45 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml new file mode 100644 index 000000000..4e4cb01e1 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-mixed-namespaces.yml @@ -0,0 +1,146 @@ +description: "client bulkWrite with mixed namespaces" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name db0 + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + - collection: + id: &collection1 collection1 + database: *database0 + collectionName: &collection1Name coll1 + - database: + id: &database1 database1 + client: *client0 + databaseName: &database1Name db1 + - collection: + id: &collection2 collection2 + database: *database1 + collectionName: &collection2Name coll2 + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: [] + - databaseName: *database0Name + collectionName: *collection1Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - databaseName: *database1Name + collectionName: *collection2Name + documents: + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + +_yamlAnchors: + db0Coll0Namespace: &db0Coll0Namespace "db0.coll0" + db0Coll1Namespace: &db0Coll1Namespace "db0.coll1" + db1Coll2Namespace: &db1Coll2Namespace "db1.coll2" + +tests: + - description: "client bulkWrite with mixed namespaces" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *db0Coll0Namespace + document: { _id: 1 } + - insertOne: + namespace: *db0Coll0Namespace + document: { _id: 2 } + - updateOne: + namespace: *db0Coll1Namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - deleteOne: + namespace: *db1Coll2Namespace + filter: { _id: 3 } + - deleteOne: + namespace: *db0Coll1Namespace + filter: { _id: 2 } + - replaceOne: + namespace: *db1Coll2Namespace + filter: { _id: 4 } + replacement: { x: 45 } + verboseResults: true + expectResult: + insertedCount: 2 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 2 + insertResults: + 0: + insertedId: 1 + 1: + insertedId: 2 + updateResults: + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 5: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + 4: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + command: + bulkWrite: 1 + ops: + - insert: 0 + document: { _id: 1 } + - insert: 0 + document: { _id: 2 } + - update: 1 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - delete: 2 + filter: { _id: 3 } + multi: false + - delete: 1 + filter: { _id: 2 } + multi: false + - update: 2 + filter: { _id: 4 } + updateMods: { x: 45 } + multi: false + nsInfo: + - ns: *db0Coll0Namespace + - ns: *db0Coll1Namespace + - ns: *db1Coll2Namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1 } + - { _id: 2 } + - databaseName: *database0Name + collectionName: *collection1Name + documents: + - { _id: 1, x: 12 } + - databaseName: *database1Name + collectionName: *collection2Name + documents: + - { _id: 4, x: 45 } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json new file mode 100644 index 000000000..a1e6af3bf --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.json @@ -0,0 +1,715 @@ +{ + "description": "client bulkWrite top-level options", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "client": { + "id": "writeConcernClient", + "uriOptions": { + "w": 1 + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "comment": { + "bulk": "write" + }, + "let": { + "id1": 1, + "id2": 2 + }, + "writeConcern": { + "w": "majority" + } + }, + "tests": [ + { + "description": "client bulkWrite comment", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "comment": { + "bulk": "write" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "comment": { + "bulk": "write" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": true, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite let", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + } + } + } + ], + "let": { + "id1": 1, + "id2": 2 + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 1, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "1": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "let": { + "id1": 1, + "id2": 2 + }, + "ops": [ + { + "update": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id1" + ] + } + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$expr": { + "$eq": [ + "$_id", + "$$id2" + ] + } + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 12 + } + ] + } + ] + }, + { + "description": "client bulkWrite bypassDocumentValidation: false is sent", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "bypassDocumentValidation": false, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "bypassDocumentValidation": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite inherits writeConcern from client", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": 1 + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite writeConcern option overrides client writeConcern", + "operations": [ + { + "object": "writeConcernClient", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 3, + "x": 33 + } + } + } + ], + "writeConcern": { + "w": "majority" + }, + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 3 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "writeConcernClient", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "writeConcern": { + "w": "majority" + }, + "ops": [ + { + "insert": 0, + "document": { + "_id": 3, + "x": 33 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml new file mode 100644 index 000000000..fdcf78879 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-options.yml @@ -0,0 +1,350 @@ +description: "client bulkWrite top-level options" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - client: + id: &writeConcernClient writeConcernClient + uriOptions: + &clientWriteConcern { w: 1 } + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + comment: &comment { bulk: "write" } + let: &let { id1: 1, id2: 2 } + writeConcern: &majorityWriteConcern { w: "majority" } + +tests: + - description: "client bulkWrite comment" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + comment: *comment + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + comment: *comment + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite bypassDocumentValidation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + bypassDocumentValidation: true + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + bypassDocumentValidation: true + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite let" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id1" ] + update: + $inc: { x: 1 } + - deleteOne: + namespace: *namespace + filter: + $expr: + $eq: [ "$_id", "$$id2" ] + let: *let + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 1 + modifiedCount: 1 + deletedCount: 1 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 1: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + let: *let + ops: + - update: 0 + filter: + $expr: + $eq: [ "$_id", "$$id1" ] + updateMods: { $inc: { x: 1 } } + multi: false + - delete: 0 + filter: + $expr: + $eq: [ "$_id", "$$id2" ] + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, x: 12 } + - description: "client bulkWrite bypassDocumentValidation: false is sent" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + bypassDocumentValidation: false + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + bypassDocumentValidation: false + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - description: "client bulkWrite writeConcern" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + writeConcern: *majorityWriteConcern + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: *majorityWriteConcern + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + - description: "client bulkWrite inherits writeConcern from client" + operations: + - object: *writeConcernClient + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *writeConcernClient + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: { w: 1 } + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace + - description: "client bulkWrite writeConcern option overrides client writeConcern" + operations: + - object: *writeConcernClient + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 3, x: 33 } + writeConcern: *majorityWriteConcern + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 3 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *writeConcernClient + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + writeConcern: *majorityWriteConcern + ops: + - insert: 0 + document: { _id: 3, x: 33 } + nsInfo: + - ns: *namespace diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json new file mode 100644 index 000000000..a55d6619b --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.json @@ -0,0 +1,290 @@ +{ + "description": "client bulkWrite with ordered option", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with ordered: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": false, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite with ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true, + "ordered": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to ordered: true", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 1, + "x": 11 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 1 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 1, + "x": 11 + } + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.yml new file mode 100644 index 000000000..dc56dcb86 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-ordered.yml @@ -0,0 +1,152 @@ +description: "client bulkWrite with ordered option" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: [] + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite with ordered: false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + ordered: false + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: false + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - description: "client bulkWrite with ordered: true" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + ordered: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - description: "client bulkWrite defaults to ordered: true" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 1, x: 11 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 1 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 1, x: 11 } + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json new file mode 100644 index 000000000..97a9e50b2 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.json @@ -0,0 +1,832 @@ +{ + "description": "client bulkWrite results", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with verboseResults: true returns detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with verboseResults: false omits detailed results", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": false + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite defaults to verboseResults: false", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "crud-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml new file mode 100644 index 000000000..eb001bbb4 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-results.yml @@ -0,0 +1,311 @@ +description: "client bulkWrite results" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite with verboseResults: true returns detailed results" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + 0: + insertedId: 8 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 3: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 4 + deleteResults: + 4: + deletedCount: 1 + 5: + deletedCount: 2 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: "client bulkWrite with verboseResults: false omits detailed results" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: false + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: "client bulkWrite defaults to verboseResults: false" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.json new file mode 100644 index 000000000..fd517adb5 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.json @@ -0,0 +1,778 @@ +{ + "description": "client bulkWrite retryable writes", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 222 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.yml new file mode 100644 index 000000000..e5022870c --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-serverErrors.yml @@ -0,0 +1,367 @@ +description: "client bulkWrite retryable writes" +schemaVersion: "1.20" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with no multi: true operations succeeds after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: *namespace + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + 0: + insertedId: 4 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 222 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with multi: true operations fails after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: *namespace + filter: { _id: 3 } + expectError: + errorCode: 189 + errorLabelsContain: [ RetryableWriteError ] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: *namespace + - description: "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: *namespace + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + 0: + insertedId: 4 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - description: "client bulkWrite with multi: true operations fails after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: *namespace + filter: { _id: 3 } + expectError: + writeConcernErrors: + - code: 91 + message: "Replication is being shut down" + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: *namespace diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json new file mode 100644 index 000000000..93a2774e5 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.json @@ -0,0 +1,948 @@ +{ + "description": "client bulkWrite update options", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0", + "collation": { + "locale": "simple" + }, + "hint": "_id_" + }, + "tests": [ + { + "description": "client bulkWrite update with arrayFilters", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array.$[i]": 4 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array.$[i]": 5 + } + }, + "arrayFilters": [ + { + "i": { + "$gte": 2 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 4, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 5, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with collation", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + } + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "collation": { + "locale": "simple" + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "collation": { + "locale": "simple" + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with hint", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_" + } + }, + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_" + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_" + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 4, + "modifiedCount": 4, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "1": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "hint": "_id_", + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 5 + ] + } + }, + "hint": "_id_", + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "hint": "_id_", + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 5 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + }, + { + "description": "client bulkWrite update with upsert", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 5 + }, + "update": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true + } + }, + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 6 + }, + "replacement": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 2, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 5 + }, + "1": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 6 + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 5 + }, + "updateMods": { + "$set": { + "array": [ + 1, + 2, + 4 + ] + } + }, + "upsert": true, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 6 + }, + "updateMods": { + "array": [ + 1, + 2, + 6 + ] + }, + "upsert": true, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 2, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 3, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 4, + "array": [ + 1, + 2, + 3 + ] + }, + { + "_id": 5, + "array": [ + 1, + 2, + 4 + ] + }, + { + "_id": 6, + "array": [ + 1, + 2, + 6 + ] + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml new file mode 100644 index 000000000..fe188a490 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-options.yml @@ -0,0 +1,337 @@ +description: "client bulkWrite update options" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, array: [ 1, 2, 3 ] } + - { _id: 2, array: [ 1, 2, 3 ] } + - { _id: 3, array: [ 1, 2, 3 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + collation: &collation { "locale": "simple" } + hint: &hint _id_ + +tests: + - description: "client bulkWrite update with arrayFilters" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $set: + array.$[i]: 4 + arrayFilters: [ i: { $gte: 2 } ] + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: + $set: + array.$[i]: 5 + arrayFilters: [ i: { $gte: 2 } ] + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $set: + array.$[i]: 4 + arrayFilters: [ i: { $gte: 2 } ] + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: + $set: + array.$[i]: 5 + arrayFilters: [ i: { $gte: 2 } ] + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 4, 4 ] } + - { _id: 2, array: [ 1, 5, 5 ] } + - { _id: 3, array: [ 1, 5, 5 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + - description: "client bulkWrite update with collation" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $set: { array: [ 1, 2, 4 ] } } + collation: *collation + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $set: { array: [ 1, 2, 5 ] } } + collation: *collation + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { array: [ 1, 2, 6 ] } + collation: *collation + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 4 + modifiedCount: 4 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + collation: *collation + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $set: { array: [ 1, 2, 5 ] } } + collation: *collation + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { array: [ 1, 2, 6 ] } + collation: *collation + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 4 ] } + - { _id: 2, array: [ 1, 2, 5 ] } + - { _id: 3, array: [ 1, 2, 5 ] } + - { _id: 4, array: [ 1, 2, 6 ] } + - description: "client bulkWrite update with hint" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $set: { array: [ 1, 2, 4 ] } } + hint: *hint + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $set: { array: [ 1, 2, 5 ] } } + hint: *hint + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { array: [ 1, 2, 6 ] } + hint: *hint + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 4 + modifiedCount: 4 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 1: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + hint: *hint + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $set: { array: [ 1, 2, 5 ] } } + hint: *hint + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { array: [ 1, 2, 6 ] } + hint: *hint + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 4 ] } + - { _id: 2, array: [ 1, 2, 5 ] } + - { _id: 3, array: [ 1, 2, 5 ] } + - { _id: 4, array: [ 1, 2, 6 ] } + - description: "client bulkWrite update with upsert" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 5 } + update: { $set: { array: [ 1, 2, 4 ] } } + upsert: true + - replaceOne: + namespace: *namespace + filter: { _id: 6 } + replacement: { array: [ 1, 2, 6 ] } + upsert: true + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 2 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 5 + 1: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 6 + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 5 } + updateMods: { $set: { array: [ 1, 2, 4 ] } } + upsert: true + multi: false + - update: 0 + filter: { _id: 6 } + updateMods: { array: [ 1, 2, 6 ] } + upsert: true + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, array: [ 1, 2, 3 ] } + - { _id: 2, array: [ 1, 2, 3 ] } + - { _id: 3, array: [ 1, 2, 3 ] } + - { _id: 4, array: [ 1, 2, 3 ] } + - { _id: 5, array: [ 1, 2, 4 ] } + - { _id: 6, array: [ 1, 2, 6 ] } diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.json new file mode 100644 index 000000000..57b6c9c1b --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.json @@ -0,0 +1,257 @@ +{ + "description": "client bulkWrite update pipeline", + "schemaVersion": "1.1", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite updateOne with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 1, + "modifiedCount": 1, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": false + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany with pipeline", + "operations": [ + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": {}, + "update": [ + { + "$addFields": { + "foo": 1 + } + } + ] + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 0, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 0, + "insertResults": {}, + "updateResults": { + "0": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": {}, + "updateMods": [ + { + "$addFields": { + "foo": 1 + } + } + ], + "multi": true + } + ], + "nsInfo": [ + { + "ns": "crud-tests.coll0" + } + ] + } + } + } + ] + } + ], + "outcome": [ + { + "databaseName": "crud-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 1, + "foo": 1 + }, + { + "_id": 2, + "x": 2, + "foo": 1 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.yml new file mode 100644 index 000000000..fe0e29a50 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-pipeline.yml @@ -0,0 +1,132 @@ +description: "client bulkWrite update pipeline" +schemaVersion: "1.1" +runOnRequirements: + - minServerVersion: "8.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - {_id: 1, x: 1} + - {_id: 2, x: 2} + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite updateOne with pipeline" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + - $addFields: + foo: 1 + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 1 + modifiedCount: 1 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { "$$exists": false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + - $addFields: + foo: 1 + multi: false + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - {_id: 1, x: 1, foo: 1} + - {_id: 2, x: 2 } + + - description: "client bulkWrite updateMany with pipeline" + operations: + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: {} + update: + - $addFields: + foo: 1 + verboseResults: true + expectResult: + insertedCount: 0 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 0 + insertResults: {} + updateResults: + 0: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { "$$exists": false } + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - update: 0 + filter: { } + updateMods: + - $addFields: + foo: 1 + multi: true + nsInfo: + - ns: *namespace + outcome: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - {_id: 1, x: 1, foo: 1} + - {_id: 2, x: 2, foo: 1} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.json b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.json new file mode 100644 index 000000000..1ac3e8d04 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.json @@ -0,0 +1,216 @@ +{ + "description": "client-bulkWrite-update-validation", + "schemaVersion": "1.0", + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "crud-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "crud-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite replaceOne prohibits atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "replaceOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "replacement": { + "$set": { + "x": 22 + } + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateOne requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateOne": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "x": 22 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + }, + { + "description": "client bulkWrite updateMany requires atomic modifiers", + "operations": [ + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "crud-tests.coll0", + "filter": { + "_id": { + "$gt": 1 + } + }, + "update": { + "x": 44 + } + } + } + ] + }, + "expectError": { + "isClientError": true + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "crud-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.yml b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.yml new file mode 100644 index 000000000..f597e0762 --- /dev/null +++ b/src/test/spec/json/crud/unified/new-bulk-write/client-bulkWrite-update-validation.yml @@ -0,0 +1,79 @@ +description: "client-bulkWrite-update-validation" + +schemaVersion: "1.0" + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name crud-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: &initialData + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "crud-tests.coll0" + +tests: + - description: "client bulkWrite replaceOne prohibits atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - replaceOne: + namespace: *namespace + filter: { _id: 1 } + replacement: { $set: { x: 22 } } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData + + - description: "client bulkWrite updateOne requires atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { x: 22 } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData + + - description: "client bulkWrite updateMany requires atomic modifiers" + operations: + - name: clientBulkWrite + object: *client0 + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: { $gt: 1 } } + update: { x: 44 } + expectError: + isClientError: true + expectEvents: + - client: *client0 + events: [] + outcome: *initialData diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.json b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.json new file mode 100644 index 000000000..64b1988ff --- /dev/null +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.json @@ -0,0 +1,350 @@ +{ + "description": "client bulkWrite retryable writes with client errors", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with one network error succeeds after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": {}, + "deleteResults": {} + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with two network errors fails after retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ], + "verboseResults": true + }, + "expectError": { + "isClientError": true, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.yml b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.yml new file mode 100644 index 000000000..d1b08ec37 --- /dev/null +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-clientErrors.yml @@ -0,0 +1,172 @@ +description: "client bulkWrite retryable writes with client errors" +schemaVersion: "1.20" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with one network error succeeds after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + 0: + insertedId: 4 + updateResults: {} + deleteResults: {} + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with two network errors fails after retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 2 + data: + failCommands: [ bulkWrite ] + closeConnection: true + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + verboseResults: true + expectError: + isClientError: true + errorLabelsContain: ["RetryableWriteError"] # Error label added by driver. + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace + # An implicit session is included with the transaction number: + lsid: { "$$exists": true } + txnNumber: { "$$exists": true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json new file mode 100644 index 000000000..f9812241f --- /dev/null +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.json @@ -0,0 +1,872 @@ +{ + "description": "client bulkWrite retryable writes", + "schemaVersion": "1.20", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "client": { + "id": "clientRetryWritesFalse", + "uriOptions": { + "retryWrites": false + }, + "observeEvents": [ + "commandStartedEvent" + ], + "useMultipleMongoses": false + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "retryable-writes-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + } + ], + "initialData": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + } + ] + } + ], + "_yamlAnchors": { + "namespace": "retryable-writes-tests.coll0" + }, + "tests": [ + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "retryable-writes-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 222 + }, + { + "_id": 4, + "x": 44 + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable top-level error", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + }, + { + "updateOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "replaceOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 2 + }, + "replacement": { + "x": 222 + } + } + }, + { + "deleteOne": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 2, + "modifiedCount": 2, + "deletedCount": 1, + "insertResults": { + "0": { + "insertedId": 4 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + } + }, + "deleteResults": { + "3": { + "deletedCount": 1 + } + } + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "_id": 2 + }, + "updateMods": { + "x": 222 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": false + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ], + "lsid": { + "$$exists": true + }, + "txnNumber": { + "$$exists": true + } + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with multi: true operations fails after retryable writeConcernError", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "client0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorLabels": [ + "RetryableWriteError" + ], + "writeConcernError": { + "code": 91, + "errmsg": "Replication is being shut down" + } + } + } + } + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "updateMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "deleteMany": { + "namespace": "retryable-writes-tests.coll0", + "filter": { + "_id": 3 + } + } + } + ] + }, + "expectError": { + "writeConcernErrors": [ + { + "code": 91, + "message": "Replication is being shut down" + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": true + }, + { + "delete": 0, + "filter": { + "_id": 3 + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + }, + { + "description": "client bulkWrite with retryWrites: false does not retry", + "operations": [ + { + "object": "testRunner", + "name": "failPoint", + "arguments": { + "client": "clientRetryWritesFalse", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 189, + "errorLabels": [ + "RetryableWriteError" + ] + } + } + } + }, + { + "object": "clientRetryWritesFalse", + "name": "clientBulkWrite", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-tests.coll0", + "document": { + "_id": 4, + "x": 44 + } + } + } + ] + }, + "expectError": { + "errorCode": 189, + "errorLabelsContain": [ + "RetryableWriteError" + ] + } + } + ], + "expectEvents": [ + { + "client": "clientRetryWritesFalse", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 4, + "x": 44 + } + } + ], + "nsInfo": [ + { + "ns": "retryable-writes-tests.coll0" + } + ] + } + } + } + ] + } + ] + } + ] +} diff --git a/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml new file mode 100644 index 000000000..d77e491a9 --- /dev/null +++ b/src/test/spec/json/retryable-writes/unified/client-bulkWrite-serverErrors.yml @@ -0,0 +1,412 @@ +description: "client bulkWrite retryable writes" +schemaVersion: "1.20" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - client: + id: &clientRetryWritesFalse clientRetryWritesFalse + uriOptions: + retryWrites: false + observeEvents: [ commandStartedEvent ] + useMultipleMongoses: false + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name retryable-writes-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + +initialData: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + +_yamlAnchors: + namespace: &namespace "retryable-writes-tests.coll0" + +tests: + - description: "client bulkWrite with no multi: true operations succeeds after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: *namespace + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + 0: + insertedId: 4 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 222 } + - { _id: 4, x: 44 } + - description: "client bulkWrite with multi: true operations fails after retryable top-level error" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: *namespace + filter: { _id: 3 } + expectError: + errorCode: 189 + errorLabelsContain: [ RetryableWriteError ] + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: *namespace + - description: "client bulkWrite with no multi: true operations succeeds after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - replaceOne: + namespace: *namespace + filter: { _id: 2 } + replacement: { x: 222 } + - deleteOne: + namespace: *namespace + filter: { _id: 3 } + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 2 + modifiedCount: 2 + deletedCount: 1 + insertResults: + 0: + insertedId: 4 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + deleteResults: + 3: + deletedCount: 1 + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: false + - update: 0 + filter: { _id: 2 } + updateMods: { x: 222 } + multi: false + - delete: 0 + filter: { _id: 3 } + multi: false + nsInfo: + - ns: *namespace + lsid: { $$exists: true } + txnNumber: { $$exists: true } + - description: "client bulkWrite with multi: true operations fails after retryable writeConcernError" + operations: + - object: testRunner + name: failPoint + arguments: + client: *client0 + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorLabels: [ RetryableWriteError ] + writeConcernError: + code: 91 + errmsg: "Replication is being shut down" + - object: *client0 + name: clientBulkWrite + arguments: + models: + - updateMany: + namespace: *namespace + filter: { _id: 1 } + update: + $inc: { x: 1 } + - deleteMany: + namespace: *namespace + filter: { _id: 3 } + expectError: + writeConcernErrors: + - code: 91 + message: "Replication is being shut down" + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - update: 0 + filter: { _id: 1 } + updateMods: + $inc: { x: 1 } + multi: true + - delete: 0 + filter: { _id: 3 } + multi: true + nsInfo: + - ns: *namespace + - description: "client bulkWrite with retryWrites: false does not retry" + operations: + - object: testRunner + name: failPoint + arguments: + client: *clientRetryWritesFalse + failPoint: + configureFailPoint: failCommand + mode: + times: 1 + data: + failCommands: [ bulkWrite ] + errorCode: 189 # PrimarySteppedDown + errorLabels: [ RetryableWriteError ] + - object: *clientRetryWritesFalse + name: clientBulkWrite + arguments: + models: + - insertOne: + namespace: *namespace + document: { _id: 4, x: 44 } + expectError: + errorCode: 189 + errorLabelsContain: [ RetryableWriteError ] + expectEvents: + - client: *clientRetryWritesFalse + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 4, x: 44 } + nsInfo: + - ns: *namespace diff --git a/src/test/spec/json/retryable-writes/unified/handshakeError.json b/src/test/spec/json/retryable-writes/unified/handshakeError.json index df37bd723..3c4646375 100644 --- a/src/test/spec/json/retryable-writes/unified/handshakeError.json +++ b/src/test/spec/json/retryable-writes/unified/handshakeError.json @@ -53,6 +53,222 @@ } ], "tests": [ + { + "description": "client.clientBulkWrite succeeds after retryable handshake network error", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } + } + ] + } + ] + }, + { + "description": "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)", + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ], + "operations": [ + { + "name": "failPoint", + "object": "testRunner", + "arguments": { + "client": "client", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 2 + }, + "data": { + "failCommands": [ + "ping", + "saslContinue" + ], + "closeConnection": true + } + } + } + }, + { + "name": "runCommand", + "object": "database", + "arguments": { + "commandName": "ping", + "command": { + "ping": 1 + } + }, + "expectError": { + "isError": true + } + }, + { + "name": "clientBulkWrite", + "object": "client", + "arguments": { + "models": [ + { + "insertOne": { + "namespace": "retryable-writes-handshake-tests.coll", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + } + } + ], + "expectEvents": [ + { + "client": "client", + "eventType": "cmap", + "events": [ + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + }, + { + "connectionCheckOutStartedEvent": {} + } + ] + }, + { + "client": "client", + "events": [ + { + "commandStartedEvent": { + "command": { + "ping": 1 + }, + "databaseName": "retryable-writes-handshake-tests" + } + }, + { + "commandFailedEvent": { + "commandName": "ping" + } + }, + { + "commandStartedEvent": { + "commandName": "bulkWrite" + } + }, + { + "commandSucceededEvent": { + "commandName": "bulkWrite" + } + } + ] + } + ] + }, { "description": "collection.insertOne succeeds after retryable handshake network error", "operations": [ diff --git a/src/test/spec/json/retryable-writes/unified/handshakeError.yml b/src/test/spec/json/retryable-writes/unified/handshakeError.yml index 9b2774bc7..131bbf2e5 100644 --- a/src/test/spec/json/retryable-writes/unified/handshakeError.yml +++ b/src/test/spec/json/retryable-writes/unified/handshakeError.yml @@ -50,6 +50,96 @@ tests: # - Triggers failpoint (second time). # - Tests whether operation successfully retries the handshake and succeeds. + - description: "client.clientBulkWrite succeeds after retryable handshake network error" + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0 + operations: + - name: failPoint + object: testRunner + arguments: + client: *client + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: [ping, saslContinue] + closeConnection: true + - name: runCommand + object: *database + arguments: { commandName: ping, command: { ping: 1 } } + expectError: { isError: true } + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: retryable-writes-handshake-tests.coll + document: { _id: 8, x: 88 } + expectEvents: + - client: *client + eventType: cmap + events: + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - client: *client + events: + - commandStartedEvent: + command: { ping: 1 } + databaseName: *databaseName + - commandFailedEvent: + commandName: ping + - commandStartedEvent: + commandName: bulkWrite + - commandSucceededEvent: + commandName: bulkWrite + + - description: "client.clientBulkWrite succeeds after retryable handshake server error (ShutdownInProgress)" + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0 + operations: + - name: failPoint + object: testRunner + arguments: + client: *client + failPoint: + configureFailPoint: failCommand + mode: { times: 2 } + data: + failCommands: [ping, saslContinue] + closeConnection: true + - name: runCommand + object: *database + arguments: { commandName: ping, command: { ping: 1 } } + expectError: { isError: true } + - name: clientBulkWrite + object: *client + arguments: + models: + - insertOne: + namespace: retryable-writes-handshake-tests.coll + document: { _id: 8, x: 88 } + expectEvents: + - client: *client + eventType: cmap + events: + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - { connectionCheckOutStartedEvent: {} } + - client: *client + events: + - commandStartedEvent: + command: { ping: 1 } + databaseName: *databaseName + - commandFailedEvent: + commandName: ping + - commandStartedEvent: + commandName: bulkWrite + - commandSucceededEvent: + commandName: bulkWrite + - description: "collection.insertOne succeeds after retryable handshake network error" operations: - name: failPoint diff --git a/src/test/spec/json/transactions/unified/client-bulkWrite.json b/src/test/spec/json/transactions/unified/client-bulkWrite.json new file mode 100644 index 000000000..f8f1d9716 --- /dev/null +++ b/src/test/spec/json/transactions/unified/client-bulkWrite.json @@ -0,0 +1,592 @@ +{ + "description": "client bulkWrite transactions", + "schemaVersion": "1.3", + "runOnRequirements": [ + { + "minServerVersion": "8.0", + "topologies": [ + "replicaset", + "sharded", + "load-balanced" + ] + } + ], + "createEntities": [ + { + "client": { + "id": "client0", + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "database": { + "id": "database0", + "client": "client0", + "databaseName": "transaction-tests" + } + }, + { + "collection": { + "id": "collection0", + "database": "database0", + "collectionName": "coll0" + } + }, + { + "session": { + "id": "session0", + "client": "client0" + } + }, + { + "client": { + "id": "client_with_wmajority", + "uriOptions": { + "w": "majority" + }, + "observeEvents": [ + "commandStartedEvent" + ] + } + }, + { + "session": { + "id": "session_with_wmajority", + "client": "client_with_wmajority" + } + } + ], + "_yamlAnchors": { + "namespace": "transaction-tests.coll0" + }, + "initialData": [ + { + "databaseName": "transaction-tests", + "collectionName": "coll0", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + } + ] + } + ], + "tests": [ + { + "description": "client bulkWrite in a transaction", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + }, + { + "updateOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 1 + }, + "update": { + "$inc": { + "x": 1 + } + } + } + }, + { + "updateMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "update": { + "$inc": { + "x": 2 + } + } + } + }, + { + "replaceOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 4 + }, + "replacement": { + "x": 44 + }, + "upsert": true + } + }, + { + "deleteOne": { + "namespace": "transaction-tests.coll0", + "filter": { + "_id": 5 + } + } + }, + { + "deleteMany": { + "namespace": "transaction-tests.coll0", + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + } + } + } + ], + "verboseResults": true + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 1, + "matchedCount": 3, + "modifiedCount": 3, + "deletedCount": 3, + "insertResults": { + "0": { + "insertedId": 8 + } + }, + "updateResults": { + "1": { + "matchedCount": 1, + "modifiedCount": 1, + "upsertedId": { + "$$exists": false + } + }, + "2": { + "matchedCount": 2, + "modifiedCount": 2, + "upsertedId": { + "$$exists": false + } + }, + "3": { + "matchedCount": 1, + "modifiedCount": 0, + "upsertedId": 4 + } + }, + "deleteResults": { + "4": { + "deletedCount": 1 + }, + "5": { + "deletedCount": 2 + } + } + } + }, + { + "object": "session0", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client0", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": false, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + }, + { + "update": 0, + "filter": { + "_id": 1 + }, + "updateMods": { + "$inc": { + "x": 1 + } + }, + "multi": false + }, + { + "update": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 1 + } + }, + { + "_id": { + "$lte": 3 + } + } + ] + }, + "updateMods": { + "$inc": { + "x": 2 + } + }, + "multi": true + }, + { + "update": 0, + "filter": { + "_id": 4 + }, + "updateMods": { + "x": 44 + }, + "upsert": true, + "multi": false + }, + { + "delete": 0, + "filter": { + "_id": 5 + }, + "multi": false + }, + { + "delete": 0, + "filter": { + "$and": [ + { + "_id": { + "$gt": 5 + } + }, + { + "_id": { + "$lte": 7 + } + } + ] + }, + "multi": true + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "commandName": "commitTransaction", + "databaseName": "admin", + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session0" + }, + "txnNumber": 1, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "$$exists": false + } + } + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 12 + }, + { + "_id": 2, + "x": 24 + }, + { + "_id": 3, + "x": 35 + }, + { + "_id": 4, + "x": 44 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client writeConcern ignored for client bulkWrite in transaction", + "operations": [ + { + "object": "session_with_wmajority", + "name": "startTransaction", + "arguments": { + "writeConcern": { + "w": 1 + } + } + }, + { + "object": "client_with_wmajority", + "name": "clientBulkWrite", + "arguments": { + "session": "session_with_wmajority", + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectResult": { + "insertedCount": 1, + "upsertedCount": 0, + "matchedCount": 0, + "modifiedCount": 0, + "deletedCount": 0, + "insertResults": { + "$$unsetOrMatches": {} + }, + "updateResults": { + "$$unsetOrMatches": {} + }, + "deleteResults": { + "$$unsetOrMatches": {} + } + } + }, + { + "object": "session_with_wmajority", + "name": "commitTransaction" + } + ], + "expectEvents": [ + { + "client": "client_with_wmajority", + "events": [ + { + "commandStartedEvent": { + "commandName": "bulkWrite", + "databaseName": "admin", + "command": { + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": 1, + "startTransaction": true, + "autocommit": false, + "writeConcern": { + "$$exists": false + }, + "bulkWrite": 1, + "errorsOnly": true, + "ordered": true, + "ops": [ + { + "insert": 0, + "document": { + "_id": 8, + "x": 88 + } + } + ], + "nsInfo": [ + { + "ns": "transaction-tests.coll0" + } + ] + } + } + }, + { + "commandStartedEvent": { + "command": { + "commitTransaction": 1, + "lsid": { + "$$sessionLsid": "session_with_wmajority" + }, + "txnNumber": { + "$numberLong": "1" + }, + "startTransaction": { + "$$exists": false + }, + "autocommit": false, + "writeConcern": { + "w": 1 + } + }, + "commandName": "commitTransaction", + "databaseName": "admin" + } + } + ] + } + ], + "outcome": [ + { + "collectionName": "coll0", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1, + "x": 11 + }, + { + "_id": 2, + "x": 22 + }, + { + "_id": 3, + "x": 33 + }, + { + "_id": 5, + "x": 55 + }, + { + "_id": 6, + "x": 66 + }, + { + "_id": 7, + "x": 77 + }, + { + "_id": 8, + "x": 88 + } + ] + } + ] + }, + { + "description": "client bulkWrite with writeConcern in a transaction causes a transaction error", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "client0", + "name": "clientBulkWrite", + "arguments": { + "session": "session0", + "writeConcern": { + "w": 1 + }, + "models": [ + { + "insertOne": { + "namespace": "transaction-tests.coll0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "isClientError": true, + "errorContains": "Cannot set write concern after starting a transaction" + } + } + ] + } + ] +} diff --git a/src/test/spec/json/transactions/unified/client-bulkWrite.yml b/src/test/spec/json/transactions/unified/client-bulkWrite.yml new file mode 100644 index 000000000..eda2babbe --- /dev/null +++ b/src/test/spec/json/transactions/unified/client-bulkWrite.yml @@ -0,0 +1,262 @@ +description: "client bulkWrite transactions" +schemaVersion: "1.3" +runOnRequirements: + - minServerVersion: "8.0" + topologies: + - replicaset + - sharded + - load-balanced + +createEntities: + - client: + id: &client0 client0 + observeEvents: [ commandStartedEvent ] + - database: + id: &database0 database0 + client: *client0 + databaseName: &database0Name transaction-tests + - collection: + id: &collection0 collection0 + database: *database0 + collectionName: &collection0Name coll0 + - session: + id: &session0 session0 + client: *client0 + - client: + id: &client_with_wmajority client_with_wmajority + uriOptions: + w: majority + observeEvents: + - commandStartedEvent + - session: + id: &session_with_wmajority session_with_wmajority + client: *client_with_wmajority + +_yamlAnchors: + namespace: &namespace "transaction-tests.coll0" + +initialData: + - databaseName: *database0Name + collectionName: *collection0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + +tests: + - description: "client bulkWrite in a transaction" + operations: + - object: *session0 + name: startTransaction + - object: *client0 + name: clientBulkWrite + arguments: + session: *session0 + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + - updateOne: + namespace: *namespace + filter: { _id: 1 } + update: { $inc: { x: 1 } } + - updateMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + update: { $inc: { x: 2 } } + - replaceOne: + namespace: *namespace + filter: { _id: 4 } + replacement: { x: 44 } + upsert: true + - deleteOne: + namespace: *namespace + filter: { _id: 5 } + - deleteMany: + namespace: *namespace + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + verboseResults: true + expectResult: + insertedCount: 1 + upsertedCount: 1 + matchedCount: 3 + modifiedCount: 3 + deletedCount: 3 + insertResults: + 0: + insertedId: 8 + updateResults: + 1: + matchedCount: 1 + modifiedCount: 1 + upsertedId: { $$exists: false } + 2: + matchedCount: 2 + modifiedCount: 2 + upsertedId: { $$exists: false } + 3: + matchedCount: 1 + modifiedCount: 0 + upsertedId: 4 + deleteResults: + 4: + deletedCount: 1 + 5: + deletedCount: 2 + - object: *session0 + name: commitTransaction + expectEvents: + - client: *client0 + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + lsid: { $$sessionLsid: *session0 } + txnNumber: 1 + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + bulkWrite: 1 + errorsOnly: false + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + - update: 0 + filter: { _id: 1 } + updateMods: { $inc: { x: 1 } } + multi: false + - update: 0 + filter: + $and: [ { _id: { $gt: 1 } }, { _id: { $lte: 3 } } ] + updateMods: { $inc: { x: 2 } } + multi: true + - update: 0 + filter: { _id: 4 } + updateMods: { x: 44 } + upsert: true + multi: false + - delete: 0 + filter: { _id: 5 } + multi: false + - delete: 0 + filter: + $and: [ { _id: { $gt: 5 } }, { _id: { $lte: 7 } } ] + multi: true + nsInfo: + - ns: *namespace + - commandStartedEvent: + commandName: commitTransaction + databaseName: admin + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session0 } + txnNumber: 1 + startTransaction: { $$exists: false } + autocommit: false + writeConcern: { $$exists: false } + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 12 } + - { _id: 2, x: 24 } + - { _id: 3, x: 35 } + - { _id: 4, x: 44 } + - { _id: 8, x: 88 } + - description: 'client writeConcern ignored for client bulkWrite in transaction' + operations: + - object: *session_with_wmajority + name: startTransaction + arguments: + writeConcern: + w: 1 + - object: *client_with_wmajority + name: clientBulkWrite + arguments: + session: *session_with_wmajority + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + expectResult: + insertedCount: 1 + upsertedCount: 0 + matchedCount: 0 + modifiedCount: 0 + deletedCount: 0 + insertResults: + $$unsetOrMatches: {} + updateResults: + $$unsetOrMatches: {} + deleteResults: + $$unsetOrMatches: {} + - object: *session_with_wmajority + name: commitTransaction + expectEvents: + - + client: *client_with_wmajority + events: + - commandStartedEvent: + commandName: bulkWrite + databaseName: admin + command: + lsid: { $$sessionLsid: *session_with_wmajority } + txnNumber: 1 + startTransaction: true + autocommit: false + writeConcern: { $$exists: false } + bulkWrite: 1 + errorsOnly: true + ordered: true + ops: + - insert: 0 + document: { _id: 8, x: 88 } + nsInfo: + - ns: *namespace + - + commandStartedEvent: + command: + commitTransaction: 1 + lsid: { $$sessionLsid: *session_with_wmajority } + txnNumber: { $numberLong: '1' } + startTransaction: { $$exists: false } + autocommit: false + writeConcern: + w: 1 + commandName: commitTransaction + databaseName: admin + outcome: + - collectionName: *collection0Name + databaseName: *database0Name + documents: + - { _id: 1, x: 11 } + - { _id: 2, x: 22 } + - { _id: 3, x: 33 } + - { _id: 5, x: 55 } + - { _id: 6, x: 66 } + - { _id: 7, x: 77 } + - { _id: 8, x: 88 } + - description: "client bulkWrite with writeConcern in a transaction causes a transaction error" + operations: + - object: *session0 + name: startTransaction + - object: *client0 + name: clientBulkWrite + arguments: + session: *session0 + writeConcern: + w: 1 + models: + - insertOne: + namespace: *namespace + document: { _id: 8, x: 88 } + expectError: + isClientError: true + errorContains: "Cannot set write concern after starting a transaction" diff --git a/src/test/spec/json/transactions/unified/mongos-pin-auto-tests.py b/src/test/spec/json/transactions/unified/mongos-pin-auto-tests.py index 99a34b485..ad2aeabd1 100644 --- a/src/test/spec/json/transactions/unified/mongos-pin-auto-tests.py +++ b/src/test/spec/json/transactions/unified/mongos-pin-auto-tests.py @@ -291,6 +291,11 @@ insert: *collection_name documents: - { _id : 1 }'''), + # clientBulkWrite: + 'clientBulkWrite': ('bulkWrite', '*client0', r'''models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 }'''), } # Maps from error_name to error_data. @@ -313,7 +318,11 @@ def create_pin_test(op_name, error_name): error_data = NON_TRANSIENT_ERRORS[error_name] if op_name.startswith('bulkWrite'): op_name = 'bulkWrite' - return TEMPLATE.format(**locals()) + test = TEMPLATE.format(**locals()) + if op_name == 'clientBulkWrite': + test += ' runOnRequirements:\n' + test += ' - minServerVersion: "8.0" # `bulkWrite` added to server 8.0"\n' + return test def create_unpin_test(op_name, error_name): @@ -324,7 +333,12 @@ def create_unpin_test(op_name, error_name): error_data = TRANSIENT_ERRORS[error_name] if op_name.startswith('bulkWrite'): op_name = 'bulkWrite' - return TEMPLATE.format(**locals()) + test = TEMPLATE.format(**locals()) + if op_name == 'clientBulkWrite': + test += ' runOnRequirements:\n' + test += ' - minServerVersion: "8.0" # `bulkWrite` added to server 8.0"\n' + return test + tests = [] diff --git a/src/test/spec/json/transactions/unified/mongos-pin-auto.json b/src/test/spec/json/transactions/unified/mongos-pin-auto.json index 93eac8bb7..27db52040 100644 --- a/src/test/spec/json/transactions/unified/mongos-pin-auto.json +++ b/src/test/spec/json/transactions/unified/mongos-pin-auto.json @@ -2004,6 +2004,104 @@ } ] }, + { + "description": "remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 11601 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsOmit": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionPinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, { "description": "unpin after transient connection error on insertOne insert", "operations": [ @@ -5175,6 +5273,202 @@ ] } ] + }, + { + "description": "unpin after transient connection error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "closeConnection": true + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] + }, + { + "description": "unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite", + "operations": [ + { + "object": "session0", + "name": "startTransaction" + }, + { + "object": "collection0", + "name": "insertOne", + "arguments": { + "session": "session0", + "document": { + "_id": 3 + } + }, + "expectResult": { + "$$unsetOrMatches": { + "insertedId": { + "$$unsetOrMatches": 3 + } + } + } + }, + { + "name": "targetedFailPoint", + "object": "testRunner", + "arguments": { + "session": "session0", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "bulkWrite" + ], + "errorCode": 91 + } + } + } + }, + { + "name": "clientBulkWrite", + "object": "client0", + "arguments": { + "session": "session0", + "models": [ + { + "insertOne": { + "namespace": "database0.collection0", + "document": { + "_id": 8, + "x": 88 + } + } + } + ] + }, + "expectError": { + "errorLabelsContain": [ + "TransientTransactionError" + ] + } + }, + { + "object": "testRunner", + "name": "assertSessionUnpinned", + "arguments": { + "session": "session0" + } + }, + { + "object": "session0", + "name": "abortTransaction" + } + ], + "outcome": [ + { + "collectionName": "test", + "databaseName": "transaction-tests", + "documents": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ] + } + ], + "runOnRequirements": [ + { + "minServerVersion": "8.0" + } + ] } ] } diff --git a/src/test/spec/json/transactions/unified/mongos-pin-auto.yml b/src/test/spec/json/transactions/unified/mongos-pin-auto.yml index 7a7634755..a80dd6203 100644 --- a/src/test/spec/json/transactions/unified/mongos-pin-auto.yml +++ b/src/test/spec/json/transactions/unified/mongos-pin-auto.yml @@ -676,6 +676,36 @@ tests: - *abortTransaction outcome: *outcome + - description: remain pinned after non-transient Interrupted error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + errorCode: 11601 + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsOmit: ["TransientTransactionError"] + - *assertSessionPinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + - description: unpin after transient connection error on insertOne insert operations: - *startTransaction @@ -1614,3 +1644,63 @@ tests: - *abortTransaction outcome: *outcome + - description: unpin after transient connection error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + closeConnection: true + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsContain: ["TransientTransactionError"] + - *assertSessionUnpinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + + - description: unpin after transient ShutdownInProgress error on clientBulkWrite bulkWrite + operations: + - *startTransaction + - *initialCommand + - name: targetedFailPoint + object: testRunner + arguments: + session: *session0 + failPoint: + configureFailPoint: failCommand + mode: {times: 1} + data: + failCommands: ["bulkWrite"] + errorCode: 91 + - name: clientBulkWrite + object: *client0 + arguments: + session: *session0 + models: + - insertOne: + namespace: database0.collection0 + document: { _id: 8, x: 88 } + expectError: + errorLabelsContain: ["TransientTransactionError"] + - *assertSessionUnpinned + - *abortTransaction + outcome: *outcome + runOnRequirements: + - minServerVersion: "8.0" # `bulkWrite` added to server 8.0" + diff --git a/src/test/spec/oidc.rs b/src/test/spec/oidc.rs index c25798986..c86f04649 100644 --- a/src/test/spec/oidc.rs +++ b/src/test/spec/oidc.rs @@ -66,7 +66,7 @@ mod basic { use crate::{ client::auth::{oidc, AuthMechanism, Credential}, options::ClientOptions, - test::{FailCommandOptions, FailPoint}, + test::util::fail_point::{FailPoint, FailPointMode}, Client, }; use bson::{doc, Document}; @@ -368,13 +368,9 @@ mod basic { let admin_client = admin_client!(); // Now set a failpoint for find with 391 error code - let options = FailCommandOptions::builder().error_code(391).build(); - let failpoint = FailPoint::fail_command( - &["find"], - crate::test::FailPointMode::Times(1), - Some(options), - ); - let _fp_guard = failpoint.enable(&admin_client, None).await.unwrap(); + let fail_point = + FailPoint::fail_command(&["find"], FailPointMode::Times(1)).error_code(391); + let _guard = admin_client.enable_fail_point(fail_point).await.unwrap(); // we need to assert the callback count let call_count = Arc::new(Mutex::new(0)); @@ -799,16 +795,12 @@ mod basic { let client = Client::with_options(opts)?; // Now set a failpoint for saslStart - let options = FailCommandOptions::builder().error_code(20).build(); - let failpoint = FailPoint::fail_command( - &["saslStart"], - // we use 5 times just because AlwaysOn is dangerous if for some reason we don't run - // the cleanup, since we will not be able to auth a new connection to turn - // off the failpoint. - crate::test::FailPointMode::Times(5), - Some(options), - ); - let _fp_guard = failpoint.enable(&admin_client, None).await.unwrap(); + // we use 5 times just because AlwaysOn is dangerous if for some reason we don't run + // the cleanup, since we will not be able to auth a new connection to turn + // off the failpoint. + let fail_point = + FailPoint::fail_command(&["saslStart"], FailPointMode::Times(5)).error_code(20); + let _guard = admin_client.enable_fail_point(fail_point).await.unwrap(); // Now find should succeed even though we have a fail point on saslStart because the spec // auth should succeed. @@ -831,13 +823,9 @@ mod basic { let admin_client = admin_client!(); // Now set a failpoint for find - let options = FailCommandOptions::builder().error_code(20).build(); - let failpoint = FailPoint::fail_command( - &["saslStart"], - crate::test::FailPointMode::Times(5), - Some(options), - ); - let _fp_guard = failpoint.enable(&admin_client, None).await.unwrap(); + let fail_point = + FailPoint::fail_command(&["saslStart"], FailPointMode::Times(5)).error_code(20); + let _guard = admin_client.enable_fail_point(fail_point).await.unwrap(); // we need to assert the callback count let call_count = Arc::new(Mutex::new(0)); let cb_call_count = call_count.clone(); @@ -920,13 +908,9 @@ mod basic { .await?; // Now set a failpoint for find with 391 error code - let options = FailCommandOptions::builder().error_code(391).build(); - let failpoint = FailPoint::fail_command( - &["find"], - crate::test::FailPointMode::Times(1), - Some(options), - ); - let _fp_guard = failpoint.enable(&admin_client, None).await.unwrap(); + let fail_point = + FailPoint::fail_command(&["find"], FailPointMode::Times(1)).error_code(391); + let _guard = admin_client.enable_fail_point(fail_point).await.unwrap(); client .database("test") @@ -1010,13 +994,9 @@ mod basic { .await?; // Now set a failpoint for find with 391 error code - let options = FailCommandOptions::builder().error_code(391).build(); - let failpoint = FailPoint::fail_command( - &["find"], - crate::test::FailPointMode::Times(1), - Some(options), - ); - let _fp_guard = failpoint.enable(&admin_client, None).await.unwrap(); + let fail_point = + FailPoint::fail_command(&["find"], FailPointMode::Times(1)).error_code(391); + let _guard = admin_client.enable_fail_point(fail_point).await.unwrap(); client .database("test") @@ -1065,13 +1045,9 @@ mod basic { assert_eq!(1, *(*call_count).lock().await); // Now set a failpoint for find with 391 error code - let options = FailCommandOptions::builder().error_code(391).build(); - let failpoint = FailPoint::fail_command( - &["find", "saslStart"], - crate::test::FailPointMode::Times(2), - Some(options), - ); - let _fp_guard = failpoint.enable(&admin_client, None).await.unwrap(); + let fail_point = FailPoint::fail_command(&["find", "saslStart"], FailPointMode::Times(2)) + .error_code(391); + let _guard = admin_client.enable_fail_point(fail_point).await.unwrap(); client .database("test") @@ -1120,13 +1096,9 @@ mod basic { assert_eq!(1, *(*call_count).lock().await); // Now set a failpoint for find with 391 error code - let options = FailCommandOptions::builder().error_code(391).build(); - let failpoint = FailPoint::fail_command( - &["find", "saslStart"], - crate::test::FailPointMode::Times(3), - Some(options), - ); - let _fp_guard = failpoint.enable(&admin_client, None).await.unwrap(); + let fail_point = FailPoint::fail_command(&["find", "saslStart"], FailPointMode::Times(3)) + .error_code(391); + let _guard = admin_client.enable_fail_point(fail_point).await.unwrap(); let res = client .database("test") diff --git a/src/test/spec/retryable_reads.rs b/src/test/spec/retryable_reads.rs index e5f1a5422..e6b2d00d1 100644 --- a/src/test/spec/retryable_reads.rs +++ b/src/test/spec/retryable_reads.rs @@ -13,11 +13,11 @@ use crate::{ get_client_options, log_uncaptured, spec::{unified_runner::run_unified_tests, v2_runner::run_v2_tests}, - util::event_buffer::EventBuffer, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, Event, - FailCommandOptions, - FailPoint, - FailPointMode, TestClient, }, Client, @@ -55,9 +55,9 @@ async fn retry_releases_connection() { // Use a connection error to ensure streaming monitor checks get cancelled. Otherwise, we'd have // to wait for the entire heartbeatFrequencyMS before the find succeeds. - let options = FailCommandOptions::builder().close_connection(true).build(); - let failpoint = FailPoint::fail_command(&["find"], FailPointMode::Times(1), Some(options)); - let _fp_guard = client.enable_failpoint(failpoint, None).await.unwrap(); + let fail_point = + FailPoint::fail_command(&["find"], FailPointMode::Times(1)).close_connection(true); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); runtime::timeout( Duration::from_secs(1), @@ -100,12 +100,10 @@ async fn retry_read_pool_cleared() { .collection("retry_read_pool_cleared"); collection.insert_one(doc! { "x": 1 }).await.unwrap(); - let options = FailCommandOptions::builder() + let fail_point = FailPoint::fail_command(&["find"], FailPointMode::Times(1)) .error_code(91) - .block_connection(Duration::from_secs(1)) - .build(); - let failpoint = FailPoint::fail_command(&["find"], FailPointMode::Times(1), Some(options)); - let _fp_guard = client.enable_failpoint(failpoint, None).await.unwrap(); + .block_connection(Duration::from_secs(1)); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); #[allow(deprecated)] let mut subscriber = buffer.subscribe(); @@ -183,12 +181,11 @@ async fn retry_read_different_mongos() { log_uncaptured("skipping retry_read_different_mongos: requires failCommand"); return; } - let fail_opts = FailCommandOptions::builder() + + let fail_point = FailPoint::fail_command(&["find"], FailPointMode::Times(1)) .error_code(6) - .close_connection(true) - .build(); - let fp = FailPoint::fail_command(&["find"], FailPointMode::Times(1), Some(fail_opts)); - guards.push(client.enable_failpoint(fp, None).await.unwrap()); + .close_connection(true); + guards.push(client.enable_fail_point(fail_point).await.unwrap()); } #[allow(deprecated)] @@ -245,12 +242,11 @@ async fn retry_read_same_mongos() { let mut client_options = client_options.clone(); client_options.direct_connection = Some(true); let client = Client::test_builder().options(client_options).build().await; - let fail_opts = FailCommandOptions::builder() + + let fail_point = FailPoint::fail_command(&["find"], FailPointMode::Times(1)) .error_code(6) - .close_connection(true) - .build(); - let fp = FailPoint::fail_command(&["find"], FailPointMode::Times(1), Some(fail_opts)); - client.enable_failpoint(fp, None).await.unwrap() + .close_connection(true); + client.enable_fail_point(fail_point).await.unwrap() }; #[allow(deprecated)] diff --git a/src/test/spec/retryable_writes.rs b/src/test/spec/retryable_writes.rs index f71214fe6..6006ad516 100644 --- a/src/test/spec/retryable_writes.rs +++ b/src/test/spec/retryable_writes.rs @@ -26,11 +26,12 @@ use crate::{ log_uncaptured, run_spec_test, spec::unified_runner::run_unified_tests, - util::{event_buffer::EventBuffer, get_default_name}, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + get_default_name, + }, Event, - FailCommandOptions, - FailPoint, - FailPointMode, TestClient, }, Client, @@ -44,7 +45,7 @@ async fn run_unified() { #[tokio::test(flavor = "multi_thread")] async fn run_legacy() { async fn run_test(test_file: TestFile) { - for mut test_case in test_file.tests { + for test_case in test_file.tests { if test_case.operation.name == "bulkWrite" { continue; } @@ -80,13 +81,13 @@ async fn run_legacy() { .expect(&test_case.description); } - let _fp_guard = if let Some(ref mut fail_point) = test_case.fail_point { - Some(fail_point.enable(&client, None).await.unwrap_or_else(|e| { - panic!( - "{}: error enabling failpoint: {:#?}", - test_case.description, e - ) - })) + let guard = if let Some(fail_point) = test_case.fail_point { + Some( + client + .enable_fail_point(fail_point) + .await + .expect(&test_case.description), + ) } else { None }; @@ -95,7 +96,7 @@ async fn run_legacy() { let result = test_case.operation.execute_on_collection(&coll, None).await; // Disable the failpoint, if any. - drop(_fp_guard); + drop(guard); if let Some(error) = test_case.outcome.error { assert_eq!( @@ -416,13 +417,11 @@ async fn retry_write_pool_cleared() { .database("retry_write_pool_cleared") .collection("retry_write_pool_cleared"); - let options = FailCommandOptions::builder() + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)) .error_code(91) .block_connection(Duration::from_secs(1)) - .error_labels(vec![RETRYABLE_WRITE_ERROR.to_string()]) - .build(); - let failpoint = FailPoint::fail_command(&["insert"], FailPointMode::Times(1), Some(options)); - let _fp_guard = client.enable_failpoint(failpoint, None).await.unwrap(); + .error_labels(vec![RETRYABLE_WRITE_ERROR]); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); #[allow(deprecated)] let mut subscriber = buffer.subscribe(); @@ -505,20 +504,18 @@ async fn retry_write_retryable_write_error() { // Enable the failpoint. let fp_guard = { let client = client.lock().await; - FailPoint::fail_command( + let fail_point = FailPoint::fail_command( &["insert"], FailPointMode::Times(1), - FailCommandOptions::builder() - .error_code(10107) - .error_labels(vec![ - "RetryableWriteError".to_string(), - "NoWritesPerformed".to_string(), - ]) - .build(), ) - .enable(client.as_ref().unwrap(), None) - .await - .unwrap() + .error_code(10107) + .error_labels(vec!["RetryableWriteError", "NoWritesPerformed"]); + client + .as_ref() + .unwrap() + .enable_fail_point(fail_point) + .await + .unwrap() }; fp_tx.send(fp_guard).unwrap(); // Defer acknowledging the message until the failpoint has been set @@ -540,19 +537,12 @@ async fn retry_write_retryable_write_error() { return; } - let _fp_guard = FailPoint::fail_command( - &["insert"], - FailPointMode::Times(1), - FailCommandOptions::builder() - .write_concern_error(doc! { - "code": 91, - "errorLabels": ["RetryableWriteError"], - }) - .build(), - ) - .enable(&client, None) - .await - .unwrap(); + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)) + .write_concern_error(doc! { + "code": 91, + "errorLabels": ["RetryableWriteError"], + }); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let result = client .database("test") @@ -589,13 +579,12 @@ async fn retry_write_different_mongos() { log_uncaptured("skipping retry_write_different_mongos: requires failCommand"); return; } - let fail_opts = FailCommandOptions::builder() + + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)) .error_code(6) - .error_labels(vec!["RetryableWriteError".to_string()]) - .close_connection(true) - .build(); - let fp = FailPoint::fail_command(&["insert"], FailPointMode::Times(1), Some(fail_opts)); - guards.push(client.enable_failpoint(fp, None).await.unwrap()); + .error_labels(vec![RETRYABLE_WRITE_ERROR]) + .close_connection(true); + guards.push(client.enable_fail_point(fail_point).await.unwrap()); } #[allow(deprecated)] @@ -652,13 +641,12 @@ async fn retry_write_same_mongos() { let mut client_options = client_options.clone(); client_options.direct_connection = Some(true); let client = Client::test_builder().options(client_options).build().await; - let fail_opts = FailCommandOptions::builder() + + let fail_point = FailPoint::fail_command(&["insert"], FailPointMode::Times(1)) .error_code(6) - .error_labels(vec!["RetryableWriteError".to_string()]) - .close_connection(true) - .build(); - let fp = FailPoint::fail_command(&["insert"], FailPointMode::Times(1), Some(fail_opts)); - client.enable_failpoint(fp, None).await.unwrap() + .error_labels(vec![RETRYABLE_WRITE_ERROR]) + .close_connection(true); + client.enable_fail_point(fail_point).await.unwrap() }; #[allow(deprecated)] diff --git a/src/test/spec/retryable_writes/test_file.rs b/src/test/spec/retryable_writes/test_file.rs index b3c72b520..75aa5fc03 100644 --- a/src/test/spec/retryable_writes/test_file.rs +++ b/src/test/spec/retryable_writes/test_file.rs @@ -4,7 +4,7 @@ use super::super::{Operation, RunOn}; use crate::{ bson::{Bson, Document}, options::ClientOptions, - test::FailPoint, + test::util::fail_point::FailPoint, }; #[derive(Debug, Deserialize)] diff --git a/src/test/spec/sdam.rs b/src/test/spec/sdam.rs index 79f1dba72..41c4ce06d 100644 --- a/src/test/spec/sdam.rs +++ b/src/test/spec/sdam.rs @@ -10,11 +10,11 @@ use crate::{ get_client_options, log_uncaptured, spec::unified_runner::run_unified_tests, - util::event_buffer::EventBuffer, + util::{ + event_buffer::EventBuffer, + fail_point::{FailPoint, FailPointMode}, + }, Event, - FailCommandOptions, - FailPoint, - FailPointMode, TestClient, }, Client, @@ -204,15 +204,13 @@ async fn rtt_is_updated() { assert!(events.len() > 2); // configure a failpoint that blocks hello commands - let fp = FailPoint::fail_command( + let fail_point = FailPoint::fail_command( &["hello", LEGACY_HELLO_COMMAND_NAME], FailPointMode::Times(1000), - FailCommandOptions::builder() - .block_connection(Duration::from_millis(500)) - .app_name(app_name.to_string()) - .build(), - ); - let _gp_guard = fp.enable(&client, None).await.unwrap(); + ) + .block_connection(Duration::from_millis(500)) + .app_name(app_name); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let mut watcher = client.topology().watch(); runtime::timeout(Duration::from_secs(10), async move { diff --git a/src/test/spec/transactions.rs b/src/test/spec/transactions.rs index 8c6c3442f..0b5a33a86 100644 --- a/src/test/spec/transactions.rs +++ b/src/test/spec/transactions.rs @@ -10,9 +10,7 @@ use crate::{ get_client_options, log_uncaptured, spec::unified_runner::run_unified_tests, - FailCommandOptions, - FailPoint, - FailPointMode, + util::fail_point::{FailPoint, FailPointMode}, TestClient, }, Client, @@ -205,17 +203,10 @@ async fn convenient_api_retry_timeout_commit_unknown() { .database("test_convenient") .collection::("test_convenient"); - let _fp = FailPoint::fail_command( - &["commitTransaction"], - FailPointMode::Times(1), - FailCommandOptions::builder() - .error_code(251) - .error_labels(vec![UNKNOWN_TRANSACTION_COMMIT_RESULT.to_string()]) - .build(), - ) - .enable(&client, None) - .await - .unwrap(); + let fail_point = FailPoint::fail_command(&["commitTransaction"], FailPointMode::Times(1)) + .error_code(251) + .error_labels(vec![UNKNOWN_TRANSACTION_COMMIT_RESULT]); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let result = session .start_transaction() @@ -257,17 +248,10 @@ async fn convenient_api_retry_timeout_commit_transient() { .database("test_convenient") .collection::("test_convenient"); - let _fp = FailPoint::fail_command( - &["commitTransaction"], - FailPointMode::Times(1), - FailCommandOptions::builder() - .error_code(251) - .error_labels(vec![TRANSIENT_TRANSACTION_ERROR.to_string()]) - .build(), - ) - .enable(&client, None) - .await - .unwrap(); + let fail_point = FailPoint::fail_command(&["commitTransaction"], FailPointMode::Times(1)) + .error_code(251) + .error_labels(vec![TRANSIENT_TRANSACTION_ERROR]); + let _guard = client.enable_fail_point(fail_point).await.unwrap(); let result = session .start_transaction() diff --git a/src/test/spec/unified_runner/operation.rs b/src/test/spec/unified_runner/operation.rs index 9be1a8b7e..49dd20fa8 100644 --- a/src/test/spec/unified_runner/operation.rs +++ b/src/test/spec/unified_runner/operation.rs @@ -1,8 +1,6 @@ +mod bulk_write; #[cfg(feature = "in-use-encryption-unstable")] mod csfle; -#[cfg(feature = "in-use-encryption-unstable")] -use self::csfle::*; - mod search_index; use std::{ @@ -10,6 +8,7 @@ use std::{ convert::TryInto, fmt::Debug, ops::Deref, + panic::{catch_unwind, AssertUnwindSafe}, sync::{ atomic::{AtomicBool, Ordering}, Arc, @@ -45,15 +44,12 @@ use super::{ use crate::{ action::Action, bson::{doc, to_bson, Bson, Document}, - client::{options::TransactionOptions, session::TransactionState}, - coll::options::Hint, - collation::Collation, - db::options::{ListCollectionsOptions, RunCursorCommandOptions}, + client::session::TransactionState, error::{ErrorKind, Result}, - gridfs::options::{GridFsDownloadByNameOptions, GridFsUploadOptions}, options::{ AggregateOptions, ChangeStreamOptions, + Collation, CountOptions, CreateCollectionOptions, DeleteOptions, @@ -65,20 +61,25 @@ use crate::{ FindOneAndUpdateOptions, FindOneOptions, FindOptions, + GridFsDownloadByNameOptions, + GridFsUploadOptions, + Hint, IndexOptions, InsertManyOptions, InsertOneOptions, + ListCollectionsOptions, ListIndexesOptions, ReadConcern, ReplaceOptions, + RunCursorCommandOptions, SelectionCriteria, + TransactionOptions, UpdateModifications, UpdateOptions, }, runtime, - selection_criteria::ReadPreference, serde_util, - test::FailPoint, + test::util::fail_point::FailPoint, ClientSession, Collection, Database, @@ -87,6 +88,11 @@ use crate::{ TopologyType, }; +use bulk_write::*; +#[cfg(feature = "in-use-encryption-unstable")] +use csfle::*; +use search_index::*; + pub(crate) trait TestOperation: Debug + Send + Sync { fn execute_test_runner_operation<'a>( &'a self, @@ -182,6 +188,7 @@ macro_rules! with_opt_session { } }; } +use with_mut_session; #[derive(Debug)] pub(crate) struct Operation { @@ -254,7 +261,7 @@ impl Operation { "{}: {} should return an error", description, self.name )); - expect_error.verify_result(&error, description).unwrap(); + expect_error.verify_result(&error, description); } Expectation::Ignore => (), } @@ -304,7 +311,7 @@ impl<'de> Deserialize<'de> for Operation { struct OperationDefinition { pub(crate) name: String, pub(crate) object: OperationObject, - #[serde(default = "default_arguments")] + #[serde(default = "Document::new")] pub(crate) arguments: Document, pub(crate) expect_error: Option, pub(crate) expect_result: Option, @@ -312,10 +319,6 @@ impl<'de> Deserialize<'de> for Operation { pub(crate) ignore_result_and_error: Option, } - fn default_arguments() -> Document { - doc! {} - } - let definition = OperationDefinition::deserialize(deserializer)?; let boxed_op = match definition.name.as_str() { "insertOne" => deserialize_op::(definition.arguments), @@ -421,21 +424,12 @@ impl<'de> Deserialize<'de> for Operation { #[cfg(feature = "in-use-encryption-unstable")] "removeKeyAltName" => deserialize_op::(definition.arguments), "iterateOnce" => deserialize_op::(definition.arguments), - "createSearchIndex" => { - deserialize_op::(definition.arguments) - } - "createSearchIndexes" => { - deserialize_op::(definition.arguments) - } - "dropSearchIndex" => { - deserialize_op::(definition.arguments) - } - "listSearchIndexes" => { - deserialize_op::(definition.arguments) - } - "updateSearchIndex" => { - deserialize_op::(definition.arguments) - } + "createSearchIndex" => deserialize_op::(definition.arguments), + "createSearchIndexes" => deserialize_op::(definition.arguments), + "dropSearchIndex" => deserialize_op::(definition.arguments), + "listSearchIndexes" => deserialize_op::(definition.arguments), + "updateSearchIndex" => deserialize_op::(definition.arguments), + "clientBulkWrite" => deserialize_op::(definition.arguments), s => Ok(Box::new(UnimplementedOperation { _name: s.to_string(), }) as Box), @@ -1360,9 +1354,8 @@ impl TestOperation for FailPointCommand { ) -> BoxFuture<'a, ()> { async move { let client = test_runner.get_client(&self.client).await; - let guard = self - .fail_point - .enable(&client, Some(ReadPreference::Primary.into())) + let guard = client + .enable_fail_point(self.fail_point.clone()) .await .unwrap(); test_runner.fail_point_guards.write().await.push(guard); @@ -1393,16 +1386,16 @@ impl TestOperation for TargetedFailPoint { .unwrap_or_else(|| panic!("ClientSession not pinned")) }) .await; - let fail_point_guard = test_runner + let guard = test_runner .internal_client - .enable_failpoint(self.fail_point.clone(), Some(selection_criteria)) + .enable_fail_point( + self.fail_point + .clone() + .selection_criteria(selection_criteria), + ) .await .unwrap(); - test_runner - .fail_point_guards - .write() - .await - .push(fail_point_guard); + test_runner.fail_point_guards.write().await.push(guard); } .boxed() } @@ -1842,7 +1835,7 @@ impl TestOperation for AssertSessionNotDirty { } #[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] +#[serde(rename_all = "camelCase", deny_unknown_fields)] pub(super) struct StartTransaction { #[serde(flatten)] options: TransactionOptions, @@ -2489,12 +2482,14 @@ impl TestOperation for Loop { self.report_success(&mut entities); } (Err(error), Expectation::Error(ref expected_error)) => { - match expected_error.verify_result(&error, operation.name.as_str()) { + match catch_unwind(AssertUnwindSafe(|| { + expected_error.verify_result(&error, operation.name.as_str()) + })) { Ok(_) => self.report_success(&mut entities), - Err(e) => report_error_or_failure!( + Err(_) => report_error_or_failure!( self.store_failures_as_entity, self.store_errors_as_entity, - e, + format!("expected {:?}, got {:?}", expected_error, error), &mut entities ), } diff --git a/src/test/spec/unified_runner/operation/bulk_write.rs b/src/test/spec/unified_runner/operation/bulk_write.rs new file mode 100644 index 000000000..b16134a05 --- /dev/null +++ b/src/test/spec/unified_runner/operation/bulk_write.rs @@ -0,0 +1,198 @@ +use futures_core::future::BoxFuture; +use futures_util::FutureExt; +use serde::Deserialize; + +use crate::{ + bson::{Array, Bson, Document}, + coll::options::UpdateModifications, + error::Result, + options::{BulkWriteOptions, WriteModel}, + test::spec::unified_runner::{Entity, TestRunner}, + ClientSession, + Namespace, +}; + +use super::{with_mut_session, TestOperation}; + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase", deny_unknown_fields)] +pub(super) struct BulkWrite { + session: Option, + models: Vec, + #[serde(flatten)] + options: BulkWriteOptions, +} + +impl<'de> Deserialize<'de> for WriteModel { + fn deserialize(deserializer: D) -> std::result::Result + where + D: serde::Deserializer<'de>, + { + #[derive(Debug, Deserialize)] + #[serde(rename_all = "camelCase")] + enum WriteModelHelper { + InsertOne { + namespace: Namespace, + document: Document, + }, + #[serde(rename_all = "camelCase")] + UpdateOne { + namespace: Namespace, + filter: Document, + update: UpdateModifications, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, + }, + #[serde(rename_all = "camelCase")] + UpdateMany { + namespace: Namespace, + filter: Document, + update: UpdateModifications, + array_filters: Option, + collation: Option, + hint: Option, + upsert: Option, + }, + #[serde(rename_all = "camelCase")] + ReplaceOne { + namespace: Namespace, + filter: Document, + replacement: Document, + collation: Option, + hint: Option, + upsert: Option, + }, + DeleteOne { + namespace: Namespace, + filter: Document, + collation: Option, + hint: Option, + }, + DeleteMany { + namespace: Namespace, + filter: Document, + collation: Option, + hint: Option, + }, + } + + let helper = WriteModelHelper::deserialize(deserializer)?; + let model = match helper { + WriteModelHelper::InsertOne { + namespace, + document, + } => WriteModel::InsertOne { + namespace, + document, + }, + WriteModelHelper::UpdateOne { + namespace, + filter, + update, + array_filters, + collation, + hint, + upsert, + } => WriteModel::UpdateOne { + namespace, + filter, + update, + array_filters, + collation, + hint, + upsert, + }, + WriteModelHelper::UpdateMany { + namespace, + filter, + update, + array_filters, + collation, + hint, + upsert, + } => WriteModel::UpdateMany { + namespace, + filter, + update, + array_filters, + collation, + hint, + upsert, + }, + WriteModelHelper::ReplaceOne { + namespace, + filter, + replacement, + collation, + hint, + upsert, + } => WriteModel::ReplaceOne { + namespace, + filter, + replacement, + collation, + hint, + upsert, + }, + WriteModelHelper::DeleteOne { + namespace, + filter, + collation, + hint, + } => WriteModel::DeleteOne { + namespace, + filter, + collation, + hint, + }, + WriteModelHelper::DeleteMany { + namespace, + filter, + collation, + hint, + } => WriteModel::DeleteMany { + namespace, + filter, + collation, + hint, + }, + }; + + Ok(model) + } +} + +impl TestOperation for BulkWrite { + fn execute_entity_operation<'a>( + &'a self, + id: &'a str, + test_runner: &'a TestRunner, + ) -> BoxFuture<'a, Result>> { + async move { + let client = test_runner.get_client(id).await; + let result = match self.session { + Some(ref session_id) => { + with_mut_session!(test_runner, session_id, |session| async { + client + .bulk_write(self.models.clone()) + .with_options(self.options.clone()) + .session(session) + .await + }) + .await + } + None => { + client + .bulk_write(self.models.clone()) + .with_options(self.options.clone()) + .await + } + }?; + let bson = bson::to_bson(&result)?; + Ok(Some(bson.into())) + } + .boxed() + } +} diff --git a/src/test/spec/unified_runner/test_file.rs b/src/test/spec/unified_runner/test_file.rs index 6bb2b68b7..26d0f8038 100644 --- a/src/test/spec/unified_runner/test_file.rs +++ b/src/test/spec/unified_runner/test_file.rs @@ -1,8 +1,7 @@ -#[cfg(feature = "tracing-unstable")] -use std::collections::HashMap; -use std::{borrow::Cow, fmt::Write, sync::Arc, time::Duration}; +use std::{borrow::Cow, collections::HashMap, fmt::Write, sync::Arc, time::Duration}; use percent_encoding::NON_ALPHANUMERIC; +use pretty_assertions::assert_eq; use regex::Regex; use semver::{Version, VersionReq}; use serde::{Deserialize, Deserializer}; @@ -16,7 +15,7 @@ use crate::{ bson::{doc, Bson, Deserializer as BsonDeserializer, Document}, client::options::{ServerApi, ServerApiVersion}, concern::{Acknowledgment, ReadConcernLevel}, - error::Error, + error::{ClientBulkWriteError, Error, ErrorKind}, gridfs::options::GridFsBucketOptions, options::{ ClientOptions, @@ -489,100 +488,111 @@ pub(crate) struct ExpectError { #[serde(default, deserialize_with = "serde_util::deserialize_nonempty_vec")] pub(crate) error_labels_omit: Option>, pub(crate) expect_result: Option, + #[serde(default, deserialize_with = "serde_util::deserialize_indexed_map")] + pub(crate) write_errors: Option>, + pub(crate) write_concern_errors: Option>, } impl ExpectError { - pub(crate) fn verify_result( - &self, - error: &Error, - description: impl AsRef, - ) -> std::result::Result<(), String> { - let description = description.as_ref(); + pub(crate) fn verify_result(&self, error: &Error, description: impl AsRef) { + let context = format!( + "test description: {}\nerror: {:?}\n", + description.as_ref(), + error + ); if let Some(is_client_error) = self.is_client_error { - if is_client_error == error.is_server_error() { - return Err(format!( - "{}: expected client error but got {:?}", - description, error - )); - } + assert_eq!(!error.is_server_error(), is_client_error, "{context}"); } + if let Some(error_contains) = &self.error_contains { - match &error.message() { - Some(msg) if msg.contains(error_contains) => (), - _ => { - return Err(format!( - "{}: \"{}\" should include message field", - description, error - )) - } - } + let Some(message) = error.message() else { + panic!("{context}expected error to have message"); + }; + assert!(message.contains(error_contains), "{context}"); } + if let Some(error_code) = self.error_code { - match &error.sdam_code() { - Some(code) => { - if code != &error_code { - return Err(format!( - "{}: error code {} ({:?}) did not match expected error code {}", - description, - code, - error.code_name(), - error_code - )); - } - } - None => { - return Err(format!( - "{}: {:?} was expected to include code {} but had no code", - description, error, error_code - )) - } - } + let Some(actual_code) = error.code() else { + panic!("{context}expected error to have code"); + }; + assert_eq!(actual_code, error_code, "{context}"); } if let Some(expected_code_name) = &self.error_code_name { - match error.code_name() { - Some(name) => { - if name != expected_code_name { - return Err(format!( - "{}: error code name \"{}\" did not match expected error code name \ - \"{}\"", - description, name, expected_code_name, - )); - } - } - None => { - return Err(format!( - "{}: {:?} was expected to include code name \"{}\" but had no code name", - description, error, expected_code_name - )) - } - } + let Some(actual_code_name) = error.code_name() else { + panic!("{}: expected error to have code name", context); + }; + assert_eq!(actual_code_name, expected_code_name, "{}", context); } + if let Some(error_labels_contain) = &self.error_labels_contain { for label in error_labels_contain { - if !error.contains_label(label) { - return Err(format!( - "{}: expected {:?} to contain label \"{}\"", - description, error, label - )); - } + assert!(error.contains_label(label), "{}", context); } } + if let Some(error_labels_omit) = &self.error_labels_omit { for label in error_labels_omit { - if error.contains_label(label) { - return Err(format!( - "{}: expected {:?} to omit label \"{}\"", - description, error, label - )); - } + assert!(!error.contains_label(label), "{}", context); } } - if self.expect_result.is_some() { - // TODO RUST-260: match against partial results + + if let Some(ref expected_result) = self.expect_result { + let actual_result = match *error.kind { + ErrorKind::ClientBulkWrite(ClientBulkWriteError { + partial_result: Some(ref partial_result), + .. + }) => Some( + bson::to_bson(partial_result) + .map_err(|e| e.to_string()) + .unwrap(), + ), + _ => None, + }; + results_match(actual_result.as_ref(), expected_result, false, None).expect(&context); + } + + if let Some(ref write_errors) = self.write_errors { + let ErrorKind::ClientBulkWrite(ClientBulkWriteError { + write_errors: ref actual_write_errors, + .. + }) = *error.kind + else { + panic!("{context}expected client bulk write error"); + }; + + for (expected_index, expected_error) in write_errors { + let actual_error = actual_write_errors.get(expected_index).expect(&context); + let actual_error = bson::to_bson(&actual_error) + .map_err(|e| e.to_string()) + .expect(&context); + results_match(Some(&actual_error), expected_error, true, None).expect(&context); + } + } + + if let Some(ref write_concern_errors) = self.write_concern_errors { + let ErrorKind::ClientBulkWrite(ClientBulkWriteError { + write_concern_errors: ref actual_write_concern_errors, + .. + }) = *error.kind + else { + panic!("{context}expected client bulk write error"); + }; + + assert_eq!( + actual_write_concern_errors.len(), + write_concern_errors.len(), + "{context}" + ); + + for (actual, expected) in actual_write_concern_errors.iter().zip(write_concern_errors) { + let actual = bson::to_bson(&actual) + .map_err(|e| e.to_string()) + .expect(&context); + results_match(Some(&actual), expected, true, None).expect(&context); + } } - Ok(()) } } diff --git a/src/test/spec/unified_runner/test_runner.rs b/src/test/spec/unified_runner/test_runner.rs index 42f6d3e5a..1154bfcfc 100644 --- a/src/test/spec/unified_runner/test_runner.rs +++ b/src/test/spec/unified_runner/test_runner.rs @@ -21,7 +21,7 @@ use crate::{ test_file::{ExpectedEventType, TestFile}, }, update_options_for_testing, - util::FailPointGuard, + util::fail_point::FailPointGuard, TestClient, DEFAULT_URI, LOAD_BALANCED_MULTIPLE_URI, @@ -69,7 +69,7 @@ const SKIPPED_OPERATIONS: &[&str] = &[ ]; static MIN_SPEC_VERSION: Version = Version::new(1, 0, 0); -static MAX_SPEC_VERSION: Version = Version::new(1, 17, 0); +static MAX_SPEC_VERSION: Version = Version::new(1, 20, 0); pub(crate) type EntityMap = HashMap; @@ -394,7 +394,7 @@ impl TestRunner { .await .unwrap(); - assert_eq!(expected_data.documents, actual_data); + assert_eq!(actual_data, expected_data.documents); } } } diff --git a/src/test/spec/v2_runner.rs b/src/test/spec/v2_runner.rs index 709d0435d..e51943466 100644 --- a/src/test/spec/v2_runner.rs +++ b/src/test/spec/v2_runner.rs @@ -4,7 +4,7 @@ pub(crate) mod operation; pub(crate) mod test_event; pub(crate) mod test_file; -use std::{future::IntoFuture, ops::Deref, sync::Arc, time::Duration}; +use std::{future::IntoFuture, sync::Arc, time::Duration}; use futures::{future::BoxFuture, FutureExt}; use semver::VersionReq; @@ -23,7 +23,10 @@ use crate::{ get_client_options, log_uncaptured, spec::deserialize_spec_tests, - util::{get_default_name, FailPointGuard}, + util::{ + fail_point::{FailPoint, FailPointGuard}, + get_default_name, + }, TestClient, SERVERLESS, }, @@ -232,8 +235,9 @@ impl TestContext { // Persist fail point guards so they disable post-test. let mut fail_point_guards: Vec = Vec::new(); - if let Some(fail_point) = &test.fail_point { - fail_point_guards.push(fail_point.enable(client.deref(), None).await.unwrap()); + if let Some(ref fail_point) = test.fail_point { + let guard = client.enable_fail_point(fail_point.clone()).await.unwrap(); + fail_point_guards.push(guard); } // Start the test sessions @@ -397,7 +401,7 @@ impl<'a> OpRunner<'a> { .unwrap(); } "targetedFailPoint" => { - let fail_point = from_bson( + let fail_point: FailPoint = from_bson( operation .execute_on_client(&self.internal_client) .await @@ -413,13 +417,12 @@ impl<'a> OpRunner<'a> { .cloned() .unwrap_or_else(|| panic!("ClientSession is not pinned")); - self.fail_point_guards.push( - self.client - .deref() - .enable_failpoint(fail_point, Some(selection_criteria)) - .await - .unwrap(), - ); + let guard = self + .client + .enable_fail_point(fail_point.selection_criteria(selection_criteria)) + .await + .unwrap(); + self.fail_point_guards.push(guard); } other => panic!("unknown operation: {}", other), } diff --git a/src/test/spec/v2_runner/operation.rs b/src/test/spec/v2_runner/operation.rs index 85f61eae9..cb41a2085 100644 --- a/src/test/spec/v2_runner/operation.rs +++ b/src/test/spec/v2_runner/operation.rs @@ -5,7 +5,7 @@ use serde::{de::Deserializer, Deserialize}; use crate::{ action::Action, - bson::{doc, to_bson, Bson, Deserializer as BsonDeserializer, Document}, + bson::{doc, Bson, Deserializer as BsonDeserializer, Document}, client::session::TransactionState, db::options::ListCollectionsOptions, error::Result, @@ -36,7 +36,7 @@ use crate::{ UpdateOptions, }, selection_criteria::{ReadPreference, SelectionCriteria}, - test::{assert_matches, log_uncaptured, FailPoint, TestClient}, + test::{assert_matches, log_uncaptured, util::fail_point::FailPoint, TestClient}, ClientSession, Collection, Database, @@ -871,7 +871,11 @@ pub(super) struct TargetedFailPoint { impl TestOperation for TargetedFailPoint { fn execute_on_client<'a>(&'a self, _client: &'a TestClient) -> BoxFuture>> { - async move { Ok(Some(to_bson(&self.fail_point)?)) }.boxed() + async move { + let command_document = bson::to_document(&self.fail_point).unwrap(); + Ok(Some(command_document.into())) + } + .boxed() } } diff --git a/src/test/spec/v2_runner/test_file.rs b/src/test/spec/v2_runner/test_file.rs index 0f1c1c988..3b684e252 100644 --- a/src/test/spec/v2_runner/test_file.rs +++ b/src/test/spec/v2_runner/test_file.rs @@ -11,8 +11,7 @@ use crate::{ test::{ log_uncaptured, spec::merge_uri_options, - util::is_expected_type, - FailPoint, + util::{fail_point::FailPoint, is_expected_type}, Serverless, TestClient, DEFAULT_URI, diff --git a/src/test/util.rs b/src/test/util.rs index 67b264a19..de3ce7ea8 100644 --- a/src/test/util.rs +++ b/src/test/util.rs @@ -1,6 +1,6 @@ mod event; pub(crate) mod event_buffer; -mod failpoint; +pub(crate) mod fail_point; mod matchable; #[cfg(feature = "tracing-unstable")] mod trace; @@ -9,7 +9,6 @@ mod trace; pub(crate) use self::event::EventClient; pub(crate) use self::{ event::Event, - failpoint::{FailCommandOptions, FailPoint, FailPointGuard, FailPointMode}, matchable::{assert_matches, eq_matches, is_expected_type, MatchErrExt, Matchable}, }; @@ -27,7 +26,6 @@ use crate::{ bson::{doc, Bson}, client::options::ServerAddress, hello::{hello_command, HelloCommandResponse}, - selection_criteria::SelectionCriteria, }; use bson::Document; use semver::{Version, VersionReq}; @@ -325,14 +323,6 @@ impl TestClient { self.server_info.topology_version.is_some() } - pub(crate) async fn enable_failpoint( - &self, - fp: FailPoint, - criteria: impl Into>, - ) -> Result { - fp.enable(self, criteria).await - } - pub(crate) fn auth_enabled(&self) -> bool { self.client.options().credential.is_some() } diff --git a/src/test/util/fail_point.rs b/src/test/util/fail_point.rs new file mode 100644 index 000000000..d1d88a056 --- /dev/null +++ b/src/test/util/fail_point.rs @@ -0,0 +1,164 @@ +use std::time::Duration; + +use serde::{Deserialize, Serialize}; + +use crate::{ + bson::{doc, Document}, + error::Result, + selection_criteria::{ReadPreference, SelectionCriteria}, + test::log_uncaptured, + Client, +}; + +impl Client { + /// Configure a fail point on this client. Any test that calls this method must use the + /// #[tokio::test(flavor = "multi_thread")] test annotation. The guard returned from this + /// method should remain in scope while the fail point is intended for use. Upon drop, the + /// guard will disable the fail point on the server. + pub(crate) async fn enable_fail_point(&self, fail_point: FailPoint) -> Result { + let command = bson::to_document(&fail_point)?; + self.database("admin") + .run_command(command) + .selection_criteria(fail_point.selection_criteria.clone()) + .await?; + + Ok(FailPointGuard { + client: self.clone(), + failure_type: fail_point.failure_type, + selection_criteria: fail_point.selection_criteria, + }) + } +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct FailPoint { + /// The type of failure to configure. The current valid values are "failCommand" and + /// "failGetMoreAfterCursorCheckout". + #[serde(rename = "configureFailPoint")] + failure_type: String, + + /// The fail point's mode. + mode: FailPointMode, + + /// The data associated with the fail point. This includes the commands that should fail and + /// the error information that should be returned. + #[serde(default)] + data: Document, + + /// The selection criteria to use when configuring this fail point. + #[serde(skip, default = "primary_selection_criteria")] + selection_criteria: SelectionCriteria, +} + +fn primary_selection_criteria() -> SelectionCriteria { + ReadPreference::Primary.into() +} + +impl FailPoint { + /// Creates a new failCommand FailPoint. Call the various builder methods on the returned + /// FailPoint to configure the type of failure that should occur. + pub(crate) fn fail_command(command_names: &[&str], mode: FailPointMode) -> Self { + let data = doc! { "failCommands": command_names }; + Self { + failure_type: "failCommand".to_string(), + mode, + data, + selection_criteria: ReadPreference::Primary.into(), + } + } + + /// The appName that a client must use to hit this fail point. + pub(crate) fn app_name(mut self, app_name: impl Into) -> Self { + self.data.insert("appName", app_name.into()); + self + } + + /// How long the server should block the affected commands. Only available on 4.2.9+ servers. + pub(crate) fn block_connection(mut self, block_connection_duration: Duration) -> Self { + self.data.insert("blockConnection", true); + self.data + .insert("blockTimeMS", block_connection_duration.as_millis() as i64); + self + } + + /// Whether the server should close the connection when the client sends an affected command. + /// Defaults to false. + pub(crate) fn close_connection(mut self, close_connection: bool) -> Self { + self.data.insert("closeConnection", close_connection); + self + } + + /// The error code to include in the server's reply to an affected command. + pub(crate) fn error_code(mut self, error_code: i64) -> Self { + self.data.insert("errorCode", error_code); + self + } + + /// The error labels to include in the server's reply to an affected command. Note that the + /// value passed to this method will completely override the labels that the server would + /// otherwise return. Only available on 4.4+ servers. + pub(crate) fn error_labels( + mut self, + error_labels: impl IntoIterator>, + ) -> Self { + let error_labels: Vec = error_labels.into_iter().map(Into::into).collect(); + self.data.insert("errorLabels", error_labels); + self + } + + /// The write concern error to include in the server's reply to an affected command. + pub(crate) fn write_concern_error(mut self, write_concern_error: Document) -> Self { + self.data.insert("writeConcernError", write_concern_error); + self + } + + /// The selection criteria to use when enabling this fail point. Defaults to a primary read + /// preference if unspecified. + pub(crate) fn selection_criteria(mut self, selection_criteria: SelectionCriteria) -> Self { + self.selection_criteria = selection_criteria; + self + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(crate) enum FailPointMode { + AlwaysOn, + Times(i32), + Skip(i32), + Off, + ActivationProbability(f32), +} + +#[derive(Debug)] +#[must_use] +pub(crate) struct FailPointGuard { + client: Client, + failure_type: String, + selection_criteria: SelectionCriteria, +} + +impl Drop for FailPointGuard { + fn drop(&mut self) { + let client = self.client.clone(); + + // This forces the Tokio runtime to not finish shutdown until this future has completed. + // Unfortunately, this also means that tests using FailPointGuards have to use the + // multi-threaded runtime. + let result = tokio::task::block_in_place(|| { + futures::executor::block_on(async move { + client + .database("admin") + .run_command( + doc! { "configureFailPoint": self.failure_type.clone(), "mode": "off" }, + ) + .selection_criteria(self.selection_criteria.clone()) + .await + }) + }); + + if let Err(error) = result { + log_uncaptured(format!("failed disabling failpoint: {:?}", error)); + } + } +} diff --git a/src/test/util/failpoint.rs b/src/test/util/failpoint.rs deleted file mode 100644 index 345c2948a..000000000 --- a/src/test/util/failpoint.rs +++ /dev/null @@ -1,152 +0,0 @@ -use bson::{doc, Document}; -use serde::{Deserialize, Serialize, Serializer}; -use std::time::Duration; -use typed_builder::TypedBuilder; - -use crate::{ - action::Action, - error::Result, - operation::append_options, - selection_criteria::SelectionCriteria, - Client, -}; - -// If you write a tokio test that uses this, make sure to annotate it with -// tokio::test(flavor = "multi_thread"). -// TODO RUST-1530 Make the error message here better. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct FailPoint { - #[serde(flatten)] - command: Document, -} - -impl FailPoint { - fn name(&self) -> &str { - self.command.get_str("configureFailPoint").unwrap() - } - - /// Create a failCommand failpoint. - /// See for more info. - pub fn fail_command( - fail_commands: &[&str], - mode: FailPointMode, - options: impl Into>, - ) -> FailPoint { - let options = options.into(); - let mut data = doc! { - "failCommands": fail_commands.iter().map(|s| s.to_string()).collect::>(), - }; - append_options(&mut data, options.as_ref()).unwrap(); - - let command = doc! { - "configureFailPoint": "failCommand", - "mode": bson::to_bson(&mode).unwrap(), - "data": data, - }; - FailPoint { command } - } - - pub async fn enable( - &self, - client: &Client, - criteria: impl Into>, - ) -> Result { - let criteria = criteria.into(); - client - .database("admin") - .run_command(self.command.clone()) - .optional(criteria.clone(), |a, c| a.selection_criteria(c)) - .await?; - Ok(FailPointGuard { - failpoint_name: self.name().to_string(), - client: client.clone(), - criteria, - }) - } -} - -#[derive(Debug)] -pub struct FailPointGuard { - client: Client, - failpoint_name: String, - criteria: Option, -} - -impl Drop for FailPointGuard { - fn drop(&mut self) { - let client = self.client.clone(); - let name = self.failpoint_name.clone(); - - // This forces the Tokio runtime to not finish shutdown until this future has completed. - // Unfortunately, this also means that tests using FailPointGuards have to use the - // multi-threaded runtime. - let result = tokio::task::block_in_place(|| { - futures::executor::block_on(async move { - client - .database("admin") - .run_command(doc! { "configureFailPoint": name, "mode": "off" }) - .optional(self.criteria.clone(), |a, c| a.selection_criteria(c)) - .await - }) - }); - - if let Err(e) = result { - println!("failed disabling failpoint: {:?}", e); - } - } -} - -#[derive(Serialize)] -#[serde(rename_all = "camelCase")] -#[allow(unused)] -pub enum FailPointMode { - AlwaysOn, - Times(i32), - Skip(i32), - Off, - ActivationProbability(f32), -} - -#[serde_with::skip_serializing_none] -#[derive(Debug, Default, TypedBuilder, Serialize)] -#[builder(field_defaults(default, setter(into)))] -#[serde(rename_all = "camelCase")] -pub struct FailCommandOptions { - /// The appName that a client must use in order to hit this fail point. - app_name: Option, - - /// If non-null, how long the server should block the affected commands. - /// Only available in 4.2.9+. - #[serde(serialize_with = "serialize_block_connection")] - #[serde(flatten)] - block_connection: Option, - - /// Whether the server should hang up when the client sends an affected command - close_connection: Option, - - /// The error code to include in the server's reply to an affected command. - error_code: Option, - - /// Array of error labels to be included in the server's reply to an affected command. Passing - /// in an empty array suppresses all error labels that would otherwise be returned by the - /// server. The existence of the "errorLabels" field in the failCommand failpoint completely - /// overrides the server's normal error labels adding behaviors for the affected commands. - /// Only available in 4.4+. - error_labels: Option>, - - /// Document to be returned as a write concern error. - write_concern_error: Option, -} - -fn serialize_block_connection( - val: &Option, - serializer: S, -) -> std::result::Result { - match val { - Some(duration) => { - (doc! { "blockConnection": true, "blockTimeMS": duration.as_millis() as i64}) - .serialize(serializer) - } - None => serializer.serialize_none(), - } -}