diff --git a/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs b/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs index bc267f9e8a..8e042ad681 100644 --- a/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs +++ b/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs @@ -532,6 +532,50 @@ private static int NextDelay(int retryCount) return Math.Min(retryCount * 200, 5000); } + /// + /// Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + /// + /// The index in which to perform the request. + /// The list of `objects` to store in the given Algolia `indexName`. + /// Add extra http header or query parameters to Algolia. + /// Cancellation Token to cancel the request. + /// + public async Task> SaveObjectsAsync(string indexName, IEnumerable objects, + RequestOptions options = null, + CancellationToken cancellationToken = default) where T : class + { + return await ChunkedBatchAsync(indexName, objects, Action.AddObject, 1000, options, cancellationToken).ConfigureAwait(false); + } + + /// + /// Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + /// + /// The index in which to perform the request. + /// The list of `objectIDs` to remove from the given Algolia `indexName`. + /// Add extra http header or query parameters to Algolia. + /// Cancellation Token to cancel the request. + public async Task> DeleteObjects(string indexName, IEnumerable objectIDs, + RequestOptions options = null, + CancellationToken cancellationToken = default) + { + return await ChunkedBatchAsync(indexName, objectIDs.Select(id => new { objectID = id }), Action.DeleteObject, 1000, options, cancellationToken).ConfigureAwait(false); + } + + /// + /// Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + /// + /// The index in which to perform the request. + /// The list of `objects` to update in the given Algolia `indexName`. + /// To be provided if non-existing objects are passed, otherwise, the call will fail. + /// Add extra http header or query parameters to Algolia. + /// Cancellation Token to cancel the request. + public async Task> PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists, + RequestOptions options = null, + CancellationToken cancellationToken = default) where T : class + { + return await ChunkedBatchAsync(indexName, objects, createIfNotExists ? Action.PartialUpdateObject : Action.PartialUpdateObjectNoCreate, 1000, options, cancellationToken).ConfigureAwait(false); + } + private static async Task> CreateIterable(Func> executeQuery, Func stopCondition) { diff --git a/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt b/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt index 4cbe175c53..73ef49f3ea 100644 --- a/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt +++ b/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt @@ -11,6 +11,7 @@ import io.ktor.util.* import kotlinx.datetime.Clock import kotlinx.datetime.Instant import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.* import kotlin.random.Random import kotlin.time.Duration import kotlin.time.Duration.Companion.milliseconds @@ -316,6 +317,80 @@ public suspend fun SearchClient.chunkedBatch( return tasks } +/** + * Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @param indexName The index in which to perform the request. + * @param objects The list of objects to index. + * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. + * @return The list of responses from the batch requests. + * + */ +public suspend fun SearchClient.saveObjects( + indexName: String, + objects: List, + requestOptions: RequestOptions? = null, +): List { + return this.chunkedBatch( + indexName = indexName, + objects = objects, + action = Action.AddObject, + waitForTask = false, + batchSize = 1000, + requestOptions = requestOptions, + ) +} + +/** + * Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + * + * @param indexName The index in which to perform the request. + * @param objectIDs The list of objectIDs to delete from the index. + * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. + * @return The list of responses from the batch requests. + * + */ +public suspend fun SearchClient.deleteObjects( + indexName: String, + objectIDs: List, + requestOptions: RequestOptions? = null, +): List { + return this.chunkedBatch( + indexName = indexName, + objects = objectIDs.map { id -> JsonObject(mapOf("objectID" to Json.encodeToJsonElement(id))) }, + action = Action.DeleteObject, + waitForTask = false, + batchSize = 1000, + requestOptions = requestOptions, + ) +} + +/** + * Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @param indexName The index in which to perform the request. + * @param objectIDs The list of objects to update in the index. + * @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call will fail.. + * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. + * @return The list of responses from the batch requests. + * + */ +public suspend fun SearchClient.partialUpdateObjects( + indexName: String, + objects: List, + createIfNotExists: Boolean, + requestOptions: RequestOptions? = null, +): List { + return this.chunkedBatch( + indexName = indexName, + objects = objects, + action = if (createIfNotExists) Action.PartialUpdateObject else Action.PartialUpdateObjectNoCreate, + waitForTask = false, + batchSize = 1000, + requestOptions = requestOptions, + ) +} + /** * Push a new set of objects and remove all previous ones. Settings, synonyms and query rules are untouched. * Replace all objects in an index without any downtime. diff --git a/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala b/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala index 71abc85ffb..a5c663c614 100644 --- a/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala +++ b/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala @@ -197,10 +197,10 @@ package object extension { * * @param indexName * The index in which to perform the request. - * @param records - * The list of records to replace. + * @param objects + * The list of objects to save. * @param action - * The action to perform on the records. + * The action to perform on the objects. * @param waitForTasks * Whether to wait for the tasks to complete. * @param batchSize @@ -212,14 +212,14 @@ package object extension { */ def chunkedBatch( indexName: String, - records: Seq[Any], + objects: Seq[Any], action: Action = Action.AddObject, waitForTasks: Boolean, batchSize: Int = 1000, requestOptions: Option[RequestOptions] = None )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = { var futures = Seq.empty[Future[BatchResponse]] - records.grouped(batchSize).foreach { chunk => + objects.grouped(batchSize).foreach { chunk => val requests = chunk.map { record => BatchRequest(action = action, body = record) } @@ -244,6 +244,66 @@ package object extension { responses } + /** Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @param indexName + * The index in which to perform the request. + * @param objects + * The list of objects to save. + * @param requestOptions + * Additional request configuration. + * @return + * A future containing the response of the batch operations. + */ + def saveObjects( + indexName: String, + objects: Seq[Any], + requestOptions: Option[RequestOptions] = None + )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = { + chunkedBatch(indexName, objects, Action.AddObject, false, 1000, requestOptions) + } + + /** Helper: Deletes every objects for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + * + * @param indexName + * The index in which to perform the request. + * @param objectIDs + * The list of objectIDs to delete. + * @param requestOptions + * Additional request configuration. + * @return + * A future containing the response of the batch operations. + */ + def deleteObjects( + indexName: String, + objectIDs: Seq[String], + requestOptions: Option[RequestOptions] = None + )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = { + chunkedBatch(indexName, objectIDs.map(id => new { val objectID: String = id }), Action.DeleteObject, false, 1000, requestOptions) + } + + /** Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @param indexName + * The index in which to perform the request. + * @param objects + * The list of objects to save. + * @param createIfNotExists + * To be provided if non-existing objects are passed, otherwise, the call will fail. + * @param requestOptions + * Additional request configuration. + * @return + * A future containing the response of the batch operations. + */ + def partialUpdateObjects( + indexName: String, + objects: Seq[Any], + createIfNotExists: Boolean, + requestOptions: Option[RequestOptions] = None + )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = { + chunkedBatch(indexName, objects, if (createIfNotExists) Action.PartialUpdateObject else Action.PartialUpdateObjectNoCreate, false, 1000, requestOptions) + } + /** Push a new set of objects and remove all previous ones. Settings, synonyms and query rules are untouched. * Replace all objects in an index without any downtime. Internally, this method copies the existing index * settings, synonyms and query rules and indexes all passed objects. Finally, the temporary one replaces the @@ -254,8 +314,8 @@ package object extension { * * @param indexName * The index in which to perform the request. - * @param records - * The list of records to replace. + * @param objects + * The list of objects to replace. * @param batchSize * The size of the batch. Default is 1000. * @param requestOptions @@ -265,11 +325,11 @@ package object extension { */ def replaceAllObjects( indexName: String, - records: Seq[Any], + objects: Seq[Any], batchSize: Int = 1000, requestOptions: Option[RequestOptions] = None )(implicit ec: ExecutionContext): Future[ReplaceAllObjectsResponse] = { - val requests = records.map { record => + val requests = objects.map { record => BatchRequest(action = Action.AddObject, body = record) } val tmpIndexName = s"${indexName}_tmp_${scala.util.Random.nextInt(100)}" @@ -287,7 +347,7 @@ package object extension { batchResponses <- chunkedBatch( indexName = tmpIndexName, - records = records, + objects = objects, action = Action.AddObject, waitForTasks = true, batchSize = batchSize, diff --git a/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift b/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift index bbfd516885..8601a33d48 100644 --- a/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift +++ b/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift @@ -456,6 +456,71 @@ public extension SearchClient { return responses } + /// Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, + /// which creates a `batch` requests with at most 1000 objects in it. + /// - parameter indexName: The name of the index where to save the objects + /// - parameter objects: The new objects + /// - parameter requestOptions: The request options + /// - returns: [BatchResponse] + func saveObjects( + indexName: String, + objects: [some Encodable], + requestOptions: RequestOptions? = nil + ) async throws -> [BatchResponse] { + try await self.chunkedBatch( + indexName: indexName, + objects: objects, + action: .addObject, + waitForTasks: false, + batchSize: 1000, + requestOptions: requestOptions + ) + } + + /// Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which + /// creates a `batch` requests with at most 1000 objectIDs in it. + /// - parameter indexName: The name of the index to delete objectIDs from + /// - parameter objectIDs: The objectIDs to delete + /// - parameter requestOptions: The request options + /// - returns: [BatchResponse] + func deleteObjects( + indexName: String, + objectIDs: [String], + requestOptions: RequestOptions? = nil + ) async throws -> [BatchResponse] { + try await self.chunkedBatch( + indexName: indexName, + objects: objectIDs.map { AnyCodable(["objectID": $0]) }, + action: .deleteObject, + waitForTasks: false, + batchSize: 1000, + requestOptions: requestOptions + ) + } + + /// Helper: Replaces object content of all the given objects according to their respective `objectID` field. The + /// `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + /// - parameter indexName: The name of the index where to update the objects + /// - parameter objects: The objects to update + /// - parameter createIfNotExist: To be provided if non-existing objects are passed, otherwise, the call will fail.. + /// - parameter requestOptions: The request options + /// - returns: [BatchResponse] + func partialUpdateObjects( + indexName: String, + objects: [some Encodable], + createIfNotExist: Bool = false, + requestOptions: RequestOptions? = nil + ) async throws -> [BatchResponse] { + try await self.chunkedBatch( + indexName: indexName, + objects: objects, + action: createIfNotExist ? .partialUpdateObject : .partialUpdateObjectNoCreate, + waitForTasks: false, + batchSize: 1000, + requestOptions: requestOptions + ) + } + /// Replace all objects in an index /// /// See https://api-clients-automation.netlify.app/docs/contributing/add-new-api-client#5-helpers for implementation diff --git a/specs/search/helpers/deleteObjects.yml b/specs/search/helpers/deleteObjects.yml new file mode 100644 index 0000000000..4f715a44ac --- /dev/null +++ b/specs/search/helpers/deleteObjects.yml @@ -0,0 +1,35 @@ +method: + post: + x-helper: true + tags: + - Records + operationId: deleteObjects + summary: Deletes every records for the given objectIDs + description: | + Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + parameters: + - in: query + name: indexName + description: The `indexName` to delete `objectIDs` from. + required: true + schema: + type: string + - in: query + name: objectIDs + description: The objectIDs to delete. + required: true + schema: + type: array + items: + type: string + responses: + '200': + description: OK + content: + application/json: + schema: + type: array + items: + $ref: '../paths/objects/common/schemas.yml#/batchResponse' + '400': + $ref: '../../common/responses/IndexNotFound.yml' diff --git a/specs/search/helpers/partialUpdateObjects.yml b/specs/search/helpers/partialUpdateObjects.yml new file mode 100644 index 0000000000..8659833e51 --- /dev/null +++ b/specs/search/helpers/partialUpdateObjects.yml @@ -0,0 +1,41 @@ +method: + post: + x-helper: true + tags: + - Records + operationId: partialUpdateObjects + summary: Replaces object content of all the given objects according to their respective `objectID` field + description: | + Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + parameters: + - in: query + name: indexName + description: The `indexName` to delete `objectIDs` from. + required: true + schema: + type: string + - in: query + name: objectIDs + description: The objectIDs to delete. + required: true + schema: + type: array + items: + type: string + - in: query + name: createIfNotExists + description: To be provided if non-existing objects are passed, otherwise, the call will fail. + required: false + schema: + type: boolean + responses: + '200': + description: OK + content: + application/json: + schema: + type: array + items: + $ref: '../paths/objects/common/schemas.yml#/batchResponse' + '400': + $ref: '../../common/responses/IndexNotFound.yml' diff --git a/specs/search/helpers/saveObjects.yml b/specs/search/helpers/saveObjects.yml new file mode 100644 index 0000000000..ffe0ae467c --- /dev/null +++ b/specs/search/helpers/saveObjects.yml @@ -0,0 +1,35 @@ +method: + post: + x-helper: true + tags: + - Records + operationId: saveObjects + summary: Saves the given array of objects in the given index + description: | + Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + parameters: + - in: query + name: indexName + description: The `indexName` to delete `objectIDs` from. + required: true + schema: + type: string + - in: query + name: objectIDs + description: The objectIDs to delete. + required: true + schema: + type: array + items: + type: string + responses: + '200': + description: OK + content: + application/json: + schema: + type: array + items: + $ref: '../paths/objects/common/schemas.yml#/batchResponse' + '400': + $ref: '../../common/responses/IndexNotFound.yml' diff --git a/specs/search/spec.yml b/specs/search/spec.yml index 65c6e2dbb9..cd028cf9e3 100644 --- a/specs/search/spec.yml +++ b/specs/search/spec.yml @@ -366,3 +366,12 @@ paths: /chunkedBatch: $ref: 'helpers/chunkedBatch.yml#/method' + + /saveObjects: + $ref: 'helpers/saveObjects.yml#/method' + + /deleteObjects: + $ref: 'helpers/deleteObjects.yml#/method' + + /partialUpdateObjects: + $ref: 'helpers/partialUpdateObjects.yml#/method' diff --git a/templates/go/search_helpers.mustache b/templates/go/search_helpers.mustache index 7b75162590..20b2991ea4 100644 --- a/templates/go/search_helpers.mustache +++ b/templates/go/search_helpers.mustache @@ -777,6 +777,35 @@ func (c *APIClient) GetSecuredApiKeyRemainingValidity(securedApiKey string) (tim return time.Until(time.Unix(int64(ts), 0)), nil } +// Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. +func (c *APIClient) SaveObjects(indexName string, objects []map[string]any) ([]BatchResponse, error) { + return c.ChunkedBatch(indexName, objects, utils.ToPtr(ACTION_ADD_OBJECT), nil, nil) +} + +// Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. +func (c *APIClient) DeleteObjects(indexName string, objectIDs []string) ([]BatchResponse, error) { + objects := make([]map[string]any, 0, len(objectIDs)) + + for _, id := range objectIDs { + objects = append(objects, map[string]any{"objectID":id}) + } + + return c.ChunkedBatch(indexName, objects, utils.ToPtr(ACTION_DELETE_OBJECT), nil, nil) +} + +// Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. +func (c *APIClient) PartialUpdateObjects(indexName string, objects []map[string]any, createIfNotExists bool) ([]BatchResponse, error) { + var action Action + + if createIfNotExists { + action = ACTION_PARTIAL_UPDATE_OBJECT + } else { + action = ACTION_PARTIAL_UPDATE_OBJECT_NO_CREATE + } + + return c.ChunkedBatch(indexName, objects, utils.ToPtr(action), nil, nil) +} + // ChunkedBatch chunks the given `objects` list in subset of 1000 elements max in order to make it fit in `batch` requests. func (c *APIClient) ChunkedBatch(indexName string, objects []map[string]any, action *Action, waitForTasks *bool, batchSize *int) ([]BatchResponse, error) { var ( @@ -873,4 +902,4 @@ func (c *APIClient) ReplaceAllObjects(indexName string, objects []map[string]any BatchResponses: batchResp, MoveOperationResponse: *moveResp, }, nil -} +} \ No newline at end of file diff --git a/templates/java/api_helpers.mustache b/templates/java/api_helpers.mustache index 8efff4536a..d316f54c5e 100644 --- a/templates/java/api_helpers.mustache +++ b/templates/java/api_helpers.mustache @@ -610,6 +610,61 @@ int batchSize return replaceAllObjects(indexName, objects, batchSize, null); } +/** +* Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. +* +* @param indexName The `indexName` to replace `objects` in. +* @param objects The array of `objects` to store in the given Algolia `indexName`. +* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) +*/ +public List saveObjects( +String indexName, +Iterable objects, +RequestOptions requestOptions +) { +return chunkedBatch(indexName, objects, Action.ADD_OBJECT, false, 1000, requestOptions); +} + +/** +* Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. +* +* @param indexName The `indexName` to delete `objectIDs` from. +* @param objectIDs The array of `objectIDs` to delete from the `indexName`. +* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) +*/ +public List deleteObjects( +String indexName, +List objectIDs, +RequestOptions requestOptions +) { +List> objects = new ArrayList<>(); + +for (String id : objectIDs) { + Map obj = new HashMap<>(); + obj.put("objectID", id); + objects.add(obj); +} + +return chunkedBatch(indexName, objects, Action.DELETE_OBJECT, false, 1000, requestOptions); +} + +/** +* Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. +* +* @param indexName The `indexName` to update `objects` in. +* @param objects The array of `objects` to update in the given Algolia `indexName`. +* @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call will fail. +* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional) +*/ +public List partialUpdateObjects( +String indexName, +Iterable objects, +boolean createIfNotExists, +RequestOptions requestOptions +) { +return chunkedBatch(indexName, objects, createIfNotExists ? Action.PARTIAL_UPDATE_OBJECT : Action.PARTIAL_UPDATE_OBJECT_NO_CREATE, false, 1000, requestOptions); +} + /** * Push a new set of objects and remove all previous ones. Settings, synonyms and query rules are * untouched. Replace all records in an index without any downtime. diff --git a/templates/javascript/clients/client/api/helpers.mustache b/templates/javascript/clients/client/api/helpers.mustache index 95ede9f4b4..a9a6daa0ab 100644 --- a/templates/javascript/clients/client/api/helpers.mustache +++ b/templates/javascript/clients/client/api/helpers.mustache @@ -331,6 +331,74 @@ async chunkedBatch({ indexName, objects, action = 'addObject', waitForTasks, bat return responses; }, +/** + * Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @summary Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + * @param saveObjects - The `saveObjects` object. + * @param saveObjects.indexName - The `indexName` to save `objects` in. + * @param saveObjects.objects - The array of `objects` to store in the given Algolia `indexName`. + * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `batch` method and merged with the transporter requestOptions. + */ +async saveObjects( + { indexName, objects }: SaveObjectsOptions, + requestOptions?: RequestOptions +): Promise { + return await this.chunkedBatch( + { indexName, objects, action: 'addObject' }, + requestOptions + ); +}, + +/** + * Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + * + * @summary Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + * @param deleteObjects - The `deleteObjects` object. + * @param deleteObjects.indexName - The `indexName` to delete `objectIDs` from. + * @param deleteObjects.objectIDs - The objectIDs to delete. + * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `batch` method and merged with the transporter requestOptions. + */ +async deleteObjects( + { indexName, objectIDs }: DeleteObjectsOptions, + requestOptions?: RequestOptions +): Promise { + return await this.chunkedBatch( + { + indexName, + objects: objectIDs.map((objectID) => ({ objectID })), + action: 'deleteObject', + }, + requestOptions + ); +}, + +/** + * Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @summary Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + * @param partialUpdateObjects - The `partialUpdateObjects` object. + * @param partialUpdateObjects.indexName - The `indexName` to update `objects` in. + * @param partialUpdateObjects.objects - The array of `objects` to update in the given Algolia `indexName`. + * @param partialUpdateObjects.createIfNotExists - To be provided if non-existing objects are passed, otherwise, the call will fail.. + * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `getTask` method and merged with the transporter requestOptions. + */ +async partialUpdateObjects( + { indexName, objects, createIfNotExists }: PartialUpdateObjectsOptions, + requestOptions?: RequestOptions +): Promise { + return await this.chunkedBatch( + { + indexName, + objects, + action: createIfNotExists + ? 'partialUpdateObject' + : 'partialUpdateObjectNoCreate', + }, + requestOptions + ); +}, + /** * Helper: Replaces all objects (records) in the given `index_name` with the given `objects`. A temporary index is created during this process in order to backup your data. * See https://api-clients-automation.netlify.app/docs/contributing/add-new-api-client#5-helpers for implementation details. @@ -340,7 +408,7 @@ async chunkedBatch({ indexName, objects, action = 'addObject', waitForTasks, bat * @param replaceAllObjects.indexName - The `indexName` to replace `objects` in. * @param replaceAllObjects.objects - The array of `objects` to store in the given Algolia `indexName`. * @param replaceAllObjects.batchSize - The size of the chunk of `objects`. The number of `batch` calls will be equal to `objects.length / batchSize`. Defaults to 1000. - * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `getTask` method and merged with the transporter requestOptions. + * @param requestOptions - The requestOptions to send along with the query, they will be forwarded to the `batch`, `operationIndex` and `getTask` method and merged with the transporter requestOptions. */ async replaceAllObjects( { indexName, objects, batchSize }: ReplaceAllObjectsOptions, @@ -350,16 +418,16 @@ async replaceAllObjects( const tmpIndexName = `${indexName}_tmp_${randomSuffix}`; let copyOperationResponse = await this.operationIndex( - { - indexName, - operationIndexParams: { - operation: 'copy', - destination: tmpIndexName, - scope: ['settings', 'rules', 'synonyms'], - }, + { + indexName, + operationIndexParams: { + operation: 'copy', + destination: tmpIndexName, + scope: ['settings', 'rules', 'synonyms'], }, - requestOptions - ); + }, + requestOptions + ); const batchResponses = await this.chunkedBatch( { indexName: tmpIndexName, objects, waitForTasks: true, batchSize }, @@ -372,16 +440,16 @@ async replaceAllObjects( }); copyOperationResponse = await this.operationIndex( - { - indexName, - operationIndexParams: { - operation: 'copy', - destination: tmpIndexName, - scope: ['settings', 'rules', 'synonyms'], - }, + { + indexName, + operationIndexParams: { + operation: 'copy', + destination: tmpIndexName, + scope: ['settings', 'rules', 'synonyms'], }, - requestOptions - ); + }, + requestOptions + ); await this.waitForTask({ indexName: tmpIndexName, taskID: copyOperationResponse.taskID, @@ -400,4 +468,4 @@ async replaceAllObjects( }); return { copyOperationResponse, batchResponses, moveOperationResponse }; -}, +}, \ No newline at end of file diff --git a/templates/javascript/clients/client/api/imports.mustache b/templates/javascript/clients/client/api/imports.mustache index eb9b0c94f1..1dbbabfff5 100644 --- a/templates/javascript/clients/client/api/imports.mustache +++ b/templates/javascript/clients/client/api/imports.mustache @@ -30,10 +30,13 @@ import type { {{#isSearchClient}} BrowseOptions, ChunkedBatchOptions, + DeleteObjectsOptions, + PartialUpdateObjectsOptions, ReplaceAllObjectsOptions, + SaveObjectsOptions, WaitForApiKeyOptions, - WaitForTaskOptions, WaitForAppTaskOptions, + WaitForTaskOptions, {{/isSearchClient}} {{#operation}} {{#vendorExtensions}} diff --git a/templates/javascript/clients/client/model/clientMethodProps.mustache b/templates/javascript/clients/client/model/clientMethodProps.mustache index 9b185432f2..379b4aa0be 100644 --- a/templates/javascript/clients/client/model/clientMethodProps.mustache +++ b/templates/javascript/clients/client/model/clientMethodProps.mustache @@ -125,6 +125,28 @@ export type GetSecuredApiKeyRemainingValidityOptions = { securedApiKey: string; } +export type DeleteObjectsOptions = Pick & { + /** + * The objectIDs to delete. + */ + objectIDs: string[]; +}; + +export type PartialUpdateObjectsOptions = Pick< + ChunkedBatchOptions, + 'indexName' | 'objects' +> & { + /** + *To be provided if non-existing objects are passed, otherwise, the call will fail. + */ + createIfNotExists?: boolean; +}; + +export type SaveObjectsOptions = Pick< + ChunkedBatchOptions, + 'indexName' | 'objects' +>; + export type ChunkedBatchOptions = ReplaceAllObjectsOptions & { /** * The `batch` `action` to perform on the given array of `objects`, defaults to `addObject`. diff --git a/templates/php/api.mustache b/templates/php/api.mustache index 4669600d95..a7c7e92f07 100644 --- a/templates/php/api.mustache +++ b/templates/php/api.mustache @@ -498,15 +498,55 @@ use {{invokerPackage}}\Support\Helpers; } /** - * Helper: Chunks the given `objects` list in subset of 1000 elements max in order to make it fit in `batch` requests. - * - * @param string $indexName the `indexName` to replace `objects` in - * @param array $objects the array of `objects` to store in the given Algolia `indexName` - * @param array $action the `batch` `action` to perform on the given array of `objects`, defaults to `addObject` - * @param array $waitForTasks whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable - * @param array $batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. - * @param array $requestOptions Request options - */ + * Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @param string $indexName The `indexName` to replace `objects` in. + * @param array $objects The array of `objects` to store in the given Algolia `indexName`. + * @param array $requestOptions Request options + */ + public function saveObjects($indexName, $objects) { + return chunkedBatch($indexName, $objects, 'addObject', false, 1000, $requestOptions); + } + + /** + * Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + * + * @param string $indexName The `indexName` to delete `objectIDs` from. + * @param array $objectIDs The `objectIDs` to delete. + * @param array $requestOptions Request options + */ + public function deleteObjects($indexName, $objects) { + $objects = []; + + foreach ($id as $objectIDs) { + $objects[] = ['objectID' => $id]; + } + + return chunkedBatch($indexName, $objects, 'deleteObjects', false, 1000, $requestOptions); + } + + /** + * Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + * + * @param string $indexName The `indexName` to replace `objects` in. + * @param array $objects The array of `objects` to store in the given Algolia `indexName`. + * @param bool $createIfNotExists To be provided if non-existing objects are passed, otherwise, the call will fail.. + * @param array $requestOptions Request options + */ + public function partialUpdateObjects($indexName, $objects, $createIfNotExists) { + return chunkedBatch($indexName, $objects, ($createIfNotExists == TRUE) ? 'partialUpdateObject' : 'partialUpdateObjectNoCreate', false, 1000, $requestOptions); + } + + /** + * Helper: Chunks the given `objects` list in subset of 1000 elements max in order to make it fit in `batch` requests. + * + * @param string $indexName the `indexName` to replace `objects` in + * @param array $objects the array of `objects` to store in the given Algolia `indexName` + * @param array $action the `batch` `action` to perform on the given array of `objects`, defaults to `addObject` + * @param array $waitForTasks whether or not we should wait until every `batch` tasks has been processed, this operation may slow the total execution time of this method but is more reliable + * @param array $batchSize The size of the chunk of `objects`. The number of `batch` calls will be equal to `length(objects) / batchSize`. Defaults to 1000. + * @param array $requestOptions Request options + */ public function chunkedBatch( $indexName, $objects, @@ -617,4 +657,4 @@ use {{invokerPackage}}\Support\Helpers; ); } } -{{/operations}} +{{/operations}} \ No newline at end of file diff --git a/templates/python/search_helpers.mustache b/templates/python/search_helpers.mustache index c1337b8181..10d1bb0a21 100644 --- a/templates/python/search_helpers.mustache +++ b/templates/python/search_helpers.mustache @@ -236,11 +236,42 @@ """ return "{}_tmp_{}".format(index_name, randint(1000000, 9999999)) + async def save_objects( + self, + index_name: str, + objects: List[Dict[str, Any]], + ) -> List[BatchResponse]: + """ + Helper: Saves the given array of objects in the given index. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + """ + return await self.chunked_batch(index_name=index_name, objects=objects, action=Action.ADDOBJECT) + + async def delete_objects( + self, + index_name: str, + object_ids: List[str], + ) -> List[BatchResponse]: + """ + Helper: Deletes every records for the given objectIDs. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. + """ + return await self.chunked_batch(index_name=index_name, objects=[{"objectID": id} for id in object_ids], action=Action.DELETEOBJECT) + + async def partial_update_objects( + self, + index_name: str, + objects: List[Dict[str, Any]], + create_if_not_exists: Optional[bool] = False, + ) -> List[BatchResponse]: + """ + Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. + """ + return await self.chunked_batch(index_name=index_name, objects=objects, action=Action.PARTIALUPDATEOBJECT and create_if_not_exists or Action.PARTIALUPDATEOBJECTNOCREATE) + async def chunked_batch( self, index_name: str, objects: List[Dict[str, Any]], - action: Action = "addObject", + action: Action = Action.ADDOBJECT, wait_for_tasks: bool = False, batch_size: int = 1000, request_options: Optional[Union[dict, RequestOptions]] = None, diff --git a/templates/ruby/search_helpers.mustache b/templates/ruby/search_helpers.mustache index c8828e8628..f7d2be6e32 100644 --- a/templates/ruby/search_helpers.mustache +++ b/templates/ruby/search_helpers.mustache @@ -203,6 +203,64 @@ def get_secured_api_key_remaining_validity(secured_api_key) valid_until - now end +# Helper: Saves the given array of objects in the given index. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. +# +# @param index_name [String]: The `index_name` to save `objects` in. +# @param objects [Array]: The array of `objects` to store in the given Algolia `indexName`. +# @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional) +# +# @return [BatchResponse] +# +def save_objects(index_name, objects, request_options = {}) + chunked_batch( + index_name, + objects, + Search::Action::ADD_OBJECT, + false, + 1000, + request_options + ) +end + +# Helper: Deletes every records for the given objectIDs. The `chunked_batch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it. +# +# @param index_name [String]: The `index_name` to delete `object_ids` from. +# @param object_ids [Array]: The object_ids to delete. +# @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional) +# +# @return [BatchResponse] +# +def delete_objects(index_name, object_ids, request_options = {}) + chunked_batch( + index_name, + object_ids.map { |id| { "objectID" => id } }, + Search::Action::DELETE_OBJECT, + false, + 1000, + request_options + ) +end + +# Helper: Replaces object content of all the given objects according to their respective `object_id` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it. +# +# @param index_name [String]: The `index_name` to delete `object_ids` from. +# @param objects [Array]: The objects to partially update. +# @param create_if_not_exists [Boolean]: To be provided if non-existing objects are passed, otherwise, the call will fail. +# @param request_options: The request options to send along with the query, they will be merged with the transporter base parameters (headers, query params, timeouts, etc.). (optional) +# +# @return [BatchResponse] +# +def partial_update_objects(index_name, objects, create_if_not_exists, request_options = {}) + chunked_batch( + index_name, + objects, + create_if_not_exists ? Search::Action::PARTIAL_UPDATE_OBJECT : Search::Action::PARTIAL_UPDATE_OBJECT_NO_CREATE, + false, + 1000, + request_options + ) +end + # Helper: Chunks the given `objects` list in subset of 1000 elements max in order to make it fit in `batch` requests. # # @param index_name [String] the `index_name` where the operation will be performed.