diff --git a/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs b/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs
index bc267f9e8a..8e042ad681 100644
--- a/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs
+++ b/clients/algoliasearch-client-csharp/algoliasearch/Utils/SearchClientExtensions.cs
@@ -532,6 +532,50 @@ private static int NextDelay(int retryCount)
return Math.Min(retryCount * 200, 5000);
}
+ ///
+ /// Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+ ///
+ /// The index in which to perform the request.
+ /// The list of `objects` to store in the given Algolia `indexName`.
+ /// Add extra http header or query parameters to Algolia.
+ /// Cancellation Token to cancel the request.
+ ///
+ public async Task> SaveObjectsAsync(string indexName, IEnumerable objects,
+ RequestOptions options = null,
+ CancellationToken cancellationToken = default) where T : class
+ {
+ return await ChunkedBatchAsync(indexName, objects, Action.AddObject, 1000, options, cancellationToken).ConfigureAwait(false);
+ }
+
+ ///
+ /// Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it.
+ ///
+ /// The index in which to perform the request.
+ /// The list of `objectIDs` to remove from the given Algolia `indexName`.
+ /// Add extra http header or query parameters to Algolia.
+ /// Cancellation Token to cancel the request.
+ public async Task> DeleteObjects(string indexName, IEnumerable objectIDs,
+ RequestOptions options = null,
+ CancellationToken cancellationToken = default)
+ {
+ return await ChunkedBatchAsync(indexName, objectIDs.Select(id => new { objectID = id }), Action.DeleteObject, 1000, options, cancellationToken).ConfigureAwait(false);
+ }
+
+ ///
+ /// Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+ ///
+ /// The index in which to perform the request.
+ /// The list of `objects` to update in the given Algolia `indexName`.
+ /// To be provided if non-existing objects are passed, otherwise, the call will fail.
+ /// Add extra http header or query parameters to Algolia.
+ /// Cancellation Token to cancel the request.
+ public async Task> PartialUpdateObjects(string indexName, IEnumerable objects, bool createIfNotExists,
+ RequestOptions options = null,
+ CancellationToken cancellationToken = default) where T : class
+ {
+ return await ChunkedBatchAsync(indexName, objects, createIfNotExists ? Action.PartialUpdateObject : Action.PartialUpdateObjectNoCreate, 1000, options, cancellationToken).ConfigureAwait(false);
+ }
+
private static async Task> CreateIterable(Func> executeQuery,
Func stopCondition)
{
diff --git a/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt b/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt
index 4cbe175c53..73ef49f3ea 100644
--- a/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt
+++ b/clients/algoliasearch-client-kotlin/client/src/commonMain/kotlin/com/algolia/client/extensions/SearchClient.kt
@@ -11,6 +11,7 @@ import io.ktor.util.*
import kotlinx.datetime.Clock
import kotlinx.datetime.Instant
import kotlinx.serialization.json.JsonObject
+import kotlinx.serialization.json.*
import kotlin.random.Random
import kotlin.time.Duration
import kotlin.time.Duration.Companion.milliseconds
@@ -316,6 +317,80 @@ public suspend fun SearchClient.chunkedBatch(
return tasks
}
+/**
+ * Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+ *
+ * @param indexName The index in which to perform the request.
+ * @param objects The list of objects to index.
+ * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions.
+ * @return The list of responses from the batch requests.
+ *
+ */
+public suspend fun SearchClient.saveObjects(
+ indexName: String,
+ objects: List,
+ requestOptions: RequestOptions? = null,
+): List {
+ return this.chunkedBatch(
+ indexName = indexName,
+ objects = objects,
+ action = Action.AddObject,
+ waitForTask = false,
+ batchSize = 1000,
+ requestOptions = requestOptions,
+ )
+}
+
+/**
+ * Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it.
+ *
+ * @param indexName The index in which to perform the request.
+ * @param objectIDs The list of objectIDs to delete from the index.
+ * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions.
+ * @return The list of responses from the batch requests.
+ *
+ */
+public suspend fun SearchClient.deleteObjects(
+ indexName: String,
+ objectIDs: List,
+ requestOptions: RequestOptions? = null,
+): List {
+ return this.chunkedBatch(
+ indexName = indexName,
+ objects = objectIDs.map { id -> JsonObject(mapOf("objectID" to Json.encodeToJsonElement(id))) },
+ action = Action.DeleteObject,
+ waitForTask = false,
+ batchSize = 1000,
+ requestOptions = requestOptions,
+ )
+}
+
+/**
+ * Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+ *
+ * @param indexName The index in which to perform the request.
+ * @param objectIDs The list of objects to update in the index.
+ * @param createIfNotExists To be provided if non-existing objects are passed, otherwise, the call will fail..
+ * @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions.
+ * @return The list of responses from the batch requests.
+ *
+ */
+public suspend fun SearchClient.partialUpdateObjects(
+ indexName: String,
+ objects: List,
+ createIfNotExists: Boolean,
+ requestOptions: RequestOptions? = null,
+): List {
+ return this.chunkedBatch(
+ indexName = indexName,
+ objects = objects,
+ action = if (createIfNotExists) Action.PartialUpdateObject else Action.PartialUpdateObjectNoCreate,
+ waitForTask = false,
+ batchSize = 1000,
+ requestOptions = requestOptions,
+ )
+}
+
/**
* Push a new set of objects and remove all previous ones. Settings, synonyms and query rules are untouched.
* Replace all objects in an index without any downtime.
diff --git a/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala b/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala
index 71abc85ffb..a5c663c614 100644
--- a/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala
+++ b/clients/algoliasearch-client-scala/src/main/scala/algoliasearch/extension/package.scala
@@ -197,10 +197,10 @@ package object extension {
*
* @param indexName
* The index in which to perform the request.
- * @param records
- * The list of records to replace.
+ * @param objects
+ * The list of objects to save.
* @param action
- * The action to perform on the records.
+ * The action to perform on the objects.
* @param waitForTasks
* Whether to wait for the tasks to complete.
* @param batchSize
@@ -212,14 +212,14 @@ package object extension {
*/
def chunkedBatch(
indexName: String,
- records: Seq[Any],
+ objects: Seq[Any],
action: Action = Action.AddObject,
waitForTasks: Boolean,
batchSize: Int = 1000,
requestOptions: Option[RequestOptions] = None
)(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = {
var futures = Seq.empty[Future[BatchResponse]]
- records.grouped(batchSize).foreach { chunk =>
+ objects.grouped(batchSize).foreach { chunk =>
val requests = chunk.map { record =>
BatchRequest(action = action, body = record)
}
@@ -244,6 +244,66 @@ package object extension {
responses
}
+ /** Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+ *
+ * @param indexName
+ * The index in which to perform the request.
+ * @param objects
+ * The list of objects to save.
+ * @param requestOptions
+ * Additional request configuration.
+ * @return
+ * A future containing the response of the batch operations.
+ */
+ def saveObjects(
+ indexName: String,
+ objects: Seq[Any],
+ requestOptions: Option[RequestOptions] = None
+ )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = {
+ chunkedBatch(indexName, objects, Action.AddObject, false, 1000, requestOptions)
+ }
+
+ /** Helper: Deletes every objects for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it.
+ *
+ * @param indexName
+ * The index in which to perform the request.
+ * @param objectIDs
+ * The list of objectIDs to delete.
+ * @param requestOptions
+ * Additional request configuration.
+ * @return
+ * A future containing the response of the batch operations.
+ */
+ def deleteObjects(
+ indexName: String,
+ objectIDs: Seq[String],
+ requestOptions: Option[RequestOptions] = None
+ )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = {
+ chunkedBatch(indexName, objectIDs.map(id => new { val objectID: String = id }), Action.DeleteObject, false, 1000, requestOptions)
+ }
+
+ /** Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+ *
+ * @param indexName
+ * The index in which to perform the request.
+ * @param objects
+ * The list of objects to save.
+ * @param createIfNotExists
+ * To be provided if non-existing objects are passed, otherwise, the call will fail.
+ * @param requestOptions
+ * Additional request configuration.
+ * @return
+ * A future containing the response of the batch operations.
+ */
+ def partialUpdateObjects(
+ indexName: String,
+ objects: Seq[Any],
+ createIfNotExists: Boolean,
+ requestOptions: Option[RequestOptions] = None
+ )(implicit ec: ExecutionContext): Future[Seq[BatchResponse]] = {
+ chunkedBatch(indexName, objects, if (createIfNotExists) Action.PartialUpdateObject else Action.PartialUpdateObjectNoCreate, false, 1000, requestOptions)
+ }
+
/** Push a new set of objects and remove all previous ones. Settings, synonyms and query rules are untouched.
* Replace all objects in an index without any downtime. Internally, this method copies the existing index
* settings, synonyms and query rules and indexes all passed objects. Finally, the temporary one replaces the
@@ -254,8 +314,8 @@ package object extension {
*
* @param indexName
* The index in which to perform the request.
- * @param records
- * The list of records to replace.
+ * @param objects
+ * The list of objects to replace.
* @param batchSize
* The size of the batch. Default is 1000.
* @param requestOptions
@@ -265,11 +325,11 @@ package object extension {
*/
def replaceAllObjects(
indexName: String,
- records: Seq[Any],
+ objects: Seq[Any],
batchSize: Int = 1000,
requestOptions: Option[RequestOptions] = None
)(implicit ec: ExecutionContext): Future[ReplaceAllObjectsResponse] = {
- val requests = records.map { record =>
+ val requests = objects.map { record =>
BatchRequest(action = Action.AddObject, body = record)
}
val tmpIndexName = s"${indexName}_tmp_${scala.util.Random.nextInt(100)}"
@@ -287,7 +347,7 @@ package object extension {
batchResponses <- chunkedBatch(
indexName = tmpIndexName,
- records = records,
+ objects = objects,
action = Action.AddObject,
waitForTasks = true,
batchSize = batchSize,
diff --git a/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift b/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift
index bbfd516885..8601a33d48 100644
--- a/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift
+++ b/clients/algoliasearch-client-swift/Sources/Search/Extra/SearchClientExtension.swift
@@ -456,6 +456,71 @@ public extension SearchClient {
return responses
}
+ /// Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood,
+ /// which creates a `batch` requests with at most 1000 objects in it.
+ /// - parameter indexName: The name of the index where to save the objects
+ /// - parameter objects: The new objects
+ /// - parameter requestOptions: The request options
+ /// - returns: [BatchResponse]
+ func saveObjects(
+ indexName: String,
+ objects: [some Encodable],
+ requestOptions: RequestOptions? = nil
+ ) async throws -> [BatchResponse] {
+ try await self.chunkedBatch(
+ indexName: indexName,
+ objects: objects,
+ action: .addObject,
+ waitForTasks: false,
+ batchSize: 1000,
+ requestOptions: requestOptions
+ )
+ }
+
+ /// Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which
+ /// creates a `batch` requests with at most 1000 objectIDs in it.
+ /// - parameter indexName: The name of the index to delete objectIDs from
+ /// - parameter objectIDs: The objectIDs to delete
+ /// - parameter requestOptions: The request options
+ /// - returns: [BatchResponse]
+ func deleteObjects(
+ indexName: String,
+ objectIDs: [String],
+ requestOptions: RequestOptions? = nil
+ ) async throws -> [BatchResponse] {
+ try await self.chunkedBatch(
+ indexName: indexName,
+ objects: objectIDs.map { AnyCodable(["objectID": $0]) },
+ action: .deleteObject,
+ waitForTasks: false,
+ batchSize: 1000,
+ requestOptions: requestOptions
+ )
+ }
+
+ /// Helper: Replaces object content of all the given objects according to their respective `objectID` field. The
+ /// `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+ /// - parameter indexName: The name of the index where to update the objects
+ /// - parameter objects: The objects to update
+ /// - parameter createIfNotExist: To be provided if non-existing objects are passed, otherwise, the call will fail..
+ /// - parameter requestOptions: The request options
+ /// - returns: [BatchResponse]
+ func partialUpdateObjects(
+ indexName: String,
+ objects: [some Encodable],
+ createIfNotExist: Bool = false,
+ requestOptions: RequestOptions? = nil
+ ) async throws -> [BatchResponse] {
+ try await self.chunkedBatch(
+ indexName: indexName,
+ objects: objects,
+ action: createIfNotExist ? .partialUpdateObject : .partialUpdateObjectNoCreate,
+ waitForTasks: false,
+ batchSize: 1000,
+ requestOptions: requestOptions
+ )
+ }
+
/// Replace all objects in an index
///
/// See https://api-clients-automation.netlify.app/docs/contributing/add-new-api-client#5-helpers for implementation
diff --git a/specs/search/helpers/deleteObjects.yml b/specs/search/helpers/deleteObjects.yml
new file mode 100644
index 0000000000..4f715a44ac
--- /dev/null
+++ b/specs/search/helpers/deleteObjects.yml
@@ -0,0 +1,35 @@
+method:
+ post:
+ x-helper: true
+ tags:
+ - Records
+ operationId: deleteObjects
+ summary: Deletes every records for the given objectIDs
+ description: |
+ Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it.
+ parameters:
+ - in: query
+ name: indexName
+ description: The `indexName` to delete `objectIDs` from.
+ required: true
+ schema:
+ type: string
+ - in: query
+ name: objectIDs
+ description: The objectIDs to delete.
+ required: true
+ schema:
+ type: array
+ items:
+ type: string
+ responses:
+ '200':
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: '../paths/objects/common/schemas.yml#/batchResponse'
+ '400':
+ $ref: '../../common/responses/IndexNotFound.yml'
diff --git a/specs/search/helpers/partialUpdateObjects.yml b/specs/search/helpers/partialUpdateObjects.yml
new file mode 100644
index 0000000000..8659833e51
--- /dev/null
+++ b/specs/search/helpers/partialUpdateObjects.yml
@@ -0,0 +1,41 @@
+method:
+ post:
+ x-helper: true
+ tags:
+ - Records
+ operationId: partialUpdateObjects
+ summary: Replaces object content of all the given objects according to their respective `objectID` field
+ description: |
+ Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+ parameters:
+ - in: query
+ name: indexName
+ description: The `indexName` to delete `objectIDs` from.
+ required: true
+ schema:
+ type: string
+ - in: query
+ name: objectIDs
+ description: The objectIDs to delete.
+ required: true
+ schema:
+ type: array
+ items:
+ type: string
+ - in: query
+ name: createIfNotExists
+ description: To be provided if non-existing objects are passed, otherwise, the call will fail.
+ required: false
+ schema:
+ type: boolean
+ responses:
+ '200':
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: '../paths/objects/common/schemas.yml#/batchResponse'
+ '400':
+ $ref: '../../common/responses/IndexNotFound.yml'
diff --git a/specs/search/helpers/saveObjects.yml b/specs/search/helpers/saveObjects.yml
new file mode 100644
index 0000000000..ffe0ae467c
--- /dev/null
+++ b/specs/search/helpers/saveObjects.yml
@@ -0,0 +1,35 @@
+method:
+ post:
+ x-helper: true
+ tags:
+ - Records
+ operationId: saveObjects
+ summary: Saves the given array of objects in the given index
+ description: |
+ Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+ parameters:
+ - in: query
+ name: indexName
+ description: The `indexName` to delete `objectIDs` from.
+ required: true
+ schema:
+ type: string
+ - in: query
+ name: objectIDs
+ description: The objectIDs to delete.
+ required: true
+ schema:
+ type: array
+ items:
+ type: string
+ responses:
+ '200':
+ description: OK
+ content:
+ application/json:
+ schema:
+ type: array
+ items:
+ $ref: '../paths/objects/common/schemas.yml#/batchResponse'
+ '400':
+ $ref: '../../common/responses/IndexNotFound.yml'
diff --git a/specs/search/spec.yml b/specs/search/spec.yml
index 65c6e2dbb9..cd028cf9e3 100644
--- a/specs/search/spec.yml
+++ b/specs/search/spec.yml
@@ -366,3 +366,12 @@ paths:
/chunkedBatch:
$ref: 'helpers/chunkedBatch.yml#/method'
+
+ /saveObjects:
+ $ref: 'helpers/saveObjects.yml#/method'
+
+ /deleteObjects:
+ $ref: 'helpers/deleteObjects.yml#/method'
+
+ /partialUpdateObjects:
+ $ref: 'helpers/partialUpdateObjects.yml#/method'
diff --git a/templates/go/search_helpers.mustache b/templates/go/search_helpers.mustache
index 7b75162590..20b2991ea4 100644
--- a/templates/go/search_helpers.mustache
+++ b/templates/go/search_helpers.mustache
@@ -777,6 +777,35 @@ func (c *APIClient) GetSecuredApiKeyRemainingValidity(securedApiKey string) (tim
return time.Until(time.Unix(int64(ts), 0)), nil
}
+// Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+func (c *APIClient) SaveObjects(indexName string, objects []map[string]any) ([]BatchResponse, error) {
+ return c.ChunkedBatch(indexName, objects, utils.ToPtr(ACTION_ADD_OBJECT), nil, nil)
+}
+
+// Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it.
+func (c *APIClient) DeleteObjects(indexName string, objectIDs []string) ([]BatchResponse, error) {
+ objects := make([]map[string]any, 0, len(objectIDs))
+
+ for _, id := range objectIDs {
+ objects = append(objects, map[string]any{"objectID":id})
+ }
+
+ return c.ChunkedBatch(indexName, objects, utils.ToPtr(ACTION_DELETE_OBJECT), nil, nil)
+}
+
+// Helper: Replaces object content of all the given objects according to their respective `objectID` field. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+func (c *APIClient) PartialUpdateObjects(indexName string, objects []map[string]any, createIfNotExists bool) ([]BatchResponse, error) {
+ var action Action
+
+ if createIfNotExists {
+ action = ACTION_PARTIAL_UPDATE_OBJECT
+ } else {
+ action = ACTION_PARTIAL_UPDATE_OBJECT_NO_CREATE
+ }
+
+ return c.ChunkedBatch(indexName, objects, utils.ToPtr(action), nil, nil)
+}
+
// ChunkedBatch chunks the given `objects` list in subset of 1000 elements max in order to make it fit in `batch` requests.
func (c *APIClient) ChunkedBatch(indexName string, objects []map[string]any, action *Action, waitForTasks *bool, batchSize *int) ([]BatchResponse, error) {
var (
@@ -873,4 +902,4 @@ func (c *APIClient) ReplaceAllObjects(indexName string, objects []map[string]any
BatchResponses: batchResp,
MoveOperationResponse: *moveResp,
}, nil
-}
+}
\ No newline at end of file
diff --git a/templates/java/api_helpers.mustache b/templates/java/api_helpers.mustache
index 8efff4536a..d316f54c5e 100644
--- a/templates/java/api_helpers.mustache
+++ b/templates/java/api_helpers.mustache
@@ -610,6 +610,61 @@ int batchSize
return replaceAllObjects(indexName, objects, batchSize, null);
}
+/**
+* Helper: Saves the given array of objects in the given index. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objects in it.
+*
+* @param indexName The `indexName` to replace `objects` in.
+* @param objects The array of `objects` to store in the given Algolia `indexName`.
+* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional)
+*/
+public List saveObjects(
+String indexName,
+Iterable objects,
+RequestOptions requestOptions
+) {
+return chunkedBatch(indexName, objects, Action.ADD_OBJECT, false, 1000, requestOptions);
+}
+
+/**
+* Helper: Deletes every records for the given objectIDs. The `chunkedBatch` helper is used under the hood, which creates a `batch` requests with at most 1000 objectIDs in it.
+*
+* @param indexName The `indexName` to delete `objectIDs` from.
+* @param objectIDs The array of `objectIDs` to delete from the `indexName`.
+* @param requestOptions The requestOptions to send along with the query, they will be merged with the transporter requestOptions. (optional)
+*/
+public List deleteObjects(
+String indexName,
+List objectIDs,
+RequestOptions requestOptions
+) {
+List