diff --git a/.mock/definition/empathic-voice/__package__.yml b/.mock/definition/empathic-voice/__package__.yml index 6aceb05..9b75906 100644 --- a/.mock/definition/empathic-voice/__package__.yml +++ b/.mock/definition/empathic-voice/__package__.yml @@ -2239,7 +2239,14 @@ types: source: openapi: assistant-asyncapi.json UserInput: - docs: User text to insert into the conversation. + docs: >- + User text to insert into the conversation. Text sent through a User Input + message is treated as the user’s speech to EVI. EVI processes this input + and provides a corresponding response. + + + Expression measurement results are not available for User Input messages, + as the prosody model relies on audio input and cannot process text alone. properties: type: type: literal<"user_input"> @@ -2323,7 +2330,9 @@ types: source: openapi: assistant-asyncapi.json AudioOutput: - docs: When provided, the output is audio. + docs: >- + The type of message sent through the socket; for an Audio Output message, + this must be `audio_output`. properties: type: type: literal<"audio_output"> diff --git a/poetry.lock b/poetry.lock index 81a9c21..ff6c2ca 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2119,25 +2119,29 @@ files = [ [[package]] name = "pywin32" -version = "306" +version = "307" description = "Python for Window Extensions" optional = false python-versions = "*" files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, + {file = "pywin32-307-cp310-cp310-win32.whl", hash = "sha256:f8f25d893c1e1ce2d685ef6d0a481e87c6f510d0f3f117932781f412e0eba31b"}, + {file = "pywin32-307-cp310-cp310-win_amd64.whl", hash = "sha256:36e650c5e5e6b29b5d317385b02d20803ddbac5d1031e1f88d20d76676dd103d"}, + {file = "pywin32-307-cp310-cp310-win_arm64.whl", hash = "sha256:0c12d61e0274e0c62acee79e3e503c312426ddd0e8d4899c626cddc1cafe0ff4"}, + {file = "pywin32-307-cp311-cp311-win32.whl", hash = "sha256:fec5d27cc893178fab299de911b8e4d12c5954e1baf83e8a664311e56a272b75"}, + {file = "pywin32-307-cp311-cp311-win_amd64.whl", hash = "sha256:987a86971753ed7fdd52a7fb5747aba955b2c7fbbc3d8b76ec850358c1cc28c3"}, + {file = "pywin32-307-cp311-cp311-win_arm64.whl", hash = "sha256:fd436897c186a2e693cd0437386ed79f989f4d13d6f353f8787ecbb0ae719398"}, + {file = "pywin32-307-cp312-cp312-win32.whl", hash = "sha256:07649ec6b01712f36debf39fc94f3d696a46579e852f60157a729ac039df0815"}, + {file = "pywin32-307-cp312-cp312-win_amd64.whl", hash = "sha256:00d047992bb5dcf79f8b9b7c81f72e0130f9fe4b22df613f755ab1cc021d8347"}, + {file = "pywin32-307-cp312-cp312-win_arm64.whl", hash = "sha256:b53658acbfc6a8241d72cc09e9d1d666be4e6c99376bc59e26cdb6223c4554d2"}, + {file = "pywin32-307-cp313-cp313-win32.whl", hash = "sha256:ea4d56e48dc1ab2aa0a5e3c0741ad6e926529510516db7a3b6981a1ae74405e5"}, + {file = "pywin32-307-cp313-cp313-win_amd64.whl", hash = "sha256:576d09813eaf4c8168d0bfd66fb7cb3b15a61041cf41598c2db4a4583bf832d2"}, + {file = "pywin32-307-cp313-cp313-win_arm64.whl", hash = "sha256:b30c9bdbffda6a260beb2919f918daced23d32c79109412c2085cbc513338a0a"}, + {file = "pywin32-307-cp37-cp37m-win32.whl", hash = "sha256:5101472f5180c647d4525a0ed289ec723a26231550dbfd369ec19d5faf60e511"}, + {file = "pywin32-307-cp37-cp37m-win_amd64.whl", hash = "sha256:05de55a7c110478dc4b202230e98af5e0720855360d2b31a44bb4e296d795fba"}, + {file = "pywin32-307-cp38-cp38-win32.whl", hash = "sha256:13d059fb7f10792542082f5731d5d3d9645320fc38814759313e5ee97c3fac01"}, + {file = "pywin32-307-cp38-cp38-win_amd64.whl", hash = "sha256:7e0b2f93769d450a98ac7a31a087e07b126b6d571e8b4386a5762eb85325270b"}, + {file = "pywin32-307-cp39-cp39-win32.whl", hash = "sha256:55ee87f2f8c294e72ad9d4261ca423022310a6e79fb314a8ca76ab3f493854c6"}, + {file = "pywin32-307-cp39-cp39-win_amd64.whl", hash = "sha256:e9d5202922e74985b037c9ef46778335c102b74b95cec70f629453dbe7235d87"}, ] [[package]] @@ -2823,13 +2827,13 @@ files = [ [[package]] name = "types-python-dateutil" -version = "2.9.0.20240906" +version = "2.9.0.20241003" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20240906.tar.gz", hash = "sha256:9706c3b68284c25adffc47319ecc7947e5bb86b3773f843c73906fd598bc176e"}, - {file = "types_python_dateutil-2.9.0.20240906-py3-none-any.whl", hash = "sha256:27c8cc2d058ccb14946eebcaaa503088f4f6dbc4fb6093d3d456a49aef2753f6"}, + {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, + {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index 388cc43..eb7b5be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "hume" -version = "0.7.2" +version = "0.7.3" description = "A Python SDK for Hume AI" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 4b14e16..adb0d7b 100644 --- a/reference.md +++ b/reference.md @@ -1,6 +1,6 @@ # Reference -## ExpressionMeasurement Batch -
client.expression_measurement.batch.list_jobs(...) +## EmpathicVoice Tools +
client.empathic_voice.tools.list_tools(...)
@@ -12,7 +12,9 @@
-Sort and filter jobs. +Fetches a paginated list of **Tools**. + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -32,7 +34,15 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.list_jobs() +response = client.empathic_voice.tools.list_tools( + page_number=0, + page_size=2, +) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ``` @@ -48,33 +58,11 @@ client.expression_measurement.batch.list_jobs()
-**limit:** `typing.Optional[int]` — The maximum number of jobs to include in the response. - -
-
- -
-
- -**status:** `typing.Optional[typing.Union[Status, typing.Sequence[Status]]]` - -Include only jobs of this status in the response. There are four possible statuses: - -- `QUEUED`: The job has been received and is waiting to be processed. - -- `IN_PROGRESS`: The job is currently being processed. - -- `COMPLETED`: The job has finished processing. - -- `FAILED`: The job encountered an error and could not be completed successfully. - -
-
+**page_number:** `typing.Optional[int]` -
-
+Specifies the page number to retrieve, enabling pagination. -**when:** `typing.Optional[When]` — Specify whether to include jobs created before or after a given `timestamp_ms`. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -82,11 +70,11 @@ Include only jobs of this status in the response. There are four possible status
-**timestamp_ms:** `typing.Optional[int]` +**page_size:** `typing.Optional[int]` -Provide a timestamp in milliseconds to filter jobs. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -94,15 +82,7 @@ When combined with the `when` parameter, you can filter jobs before or after the
-**sort_by:** `typing.Optional[SortBy]` - -Specify which timestamp to sort the jobs by. - -- `created`: Sort jobs by the time of creation, indicated by `created_timestamp_ms`. - -- `started`: Sort jobs by the time processing started, indicated by `started_timestamp_ms`. - -- `ended`: Sort jobs by the time processing ended, indicated by `ended_timestamp_ms`. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false.
@@ -110,13 +90,7 @@ Specify which timestamp to sort the jobs by.
-**direction:** `typing.Optional[Direction]` - -Specify the order in which to sort the jobs. Defaults to descending order. - -- `asc`: Sort in ascending order (chronological, with the oldest records first). - -- `desc`: Sort in descending order (reverse-chronological, with the newest records first). +**name:** `typing.Optional[str]` — Filter to only include tools with this name.
@@ -136,7 +110,7 @@ Specify the order in which to sort the jobs. Defaults to descending order.
-
client.expression_measurement.batch.start_inference_job(...) +
client.empathic_voice.tools.create_tool(...)
@@ -148,7 +122,9 @@ Specify the order in which to sort the jobs. Defaults to descending order.
-Start a new measurement inference job. +Creates a **Tool** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -168,9 +144,12 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.start_inference_job( - urls=["https://hume-tutorials.s3.amazonaws.com/faces.zip"], - notify=True, +client.empathic_voice.tools.create_tool( + name="get_current_weather", + parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', + version_description="Fetches current weather and uses celsius or fahrenheit based on location of user.", + description="This tool is for getting the current weather.", + fallback_content="Unable to fetch current weather.", ) ``` @@ -187,19 +166,7 @@ client.expression_measurement.batch.start_inference_job(
-**models:** `typing.Optional[Models]` - -Specify the models to use for inference. - -If this field is not explicitly set, then all models will run by default. - -
-
- -
-
- -**transcription:** `typing.Optional[Transcription]` +**name:** `str` — Name applied to all versions of a particular Tool.
@@ -207,11 +174,11 @@ If this field is not explicitly set, then all models will run by default.
-**urls:** `typing.Optional[typing.Sequence[str]]` +**parameters:** `str` -URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. +Stringified JSON defining the parameters used by this version of the Tool. -If you wish to supply more than 100 URLs, consider providing them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). +These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format.
@@ -219,7 +186,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**text:** `typing.Optional[typing.Sequence[str]]` — Text supplied directly to our Emotional Language and NER models for analysis. +**version_description:** `typing.Optional[str]` — An optional description of the Tool version.
@@ -227,7 +194,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**callback_url:** `typing.Optional[str]` — If provided, a `POST` request will be made to the URL with the generated predictions on completion or the error message on failure. +**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function.
@@ -235,7 +202,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-**notify:** `typing.Optional[bool]` — Whether to send an email notification to the user upon job completion/failure. +**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors.
@@ -255,7 +222,7 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-
client.expression_measurement.batch.get_job_details(...) +
client.empathic_voice.tools.list_tool_versions(...)
@@ -267,7 +234,9 @@ If you wish to supply more than 100 URLs, consider providing them as an archive
-Get the request details and state of a given job. +Fetches a list of a **Tool's** versions. + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -287,8 +256,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.get_job_details( - id="job_id", +client.empathic_voice.tools.list_tool_versions( + id="00183a3f-79ba-413d-9f3b-609864268bea", ) ``` @@ -305,7 +274,39 @@ client.expression_measurement.batch.get_job_details(
-**id:** `str` — The unique identifier for the job. +**id:** `str` — Identifier for a Tool. Formatted as a UUID. + +
+
+ +
+
+ +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false.
@@ -325,7 +326,7 @@ client.expression_measurement.batch.get_job_details(
-
client.expression_measurement.batch.get_job_predictions(...) +
client.empathic_voice.tools.create_tool_version(...)
@@ -337,7 +338,9 @@ client.expression_measurement.batch.get_job_details(
-Get the JSON predictions of a completed inference job. +Updates a **Tool** by creating a new version of the **Tool**. + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -357,8 +360,12 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.get_job_predictions( - id="job_id", +client.empathic_voice.tools.create_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit", "kelvin"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', + version_description="Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", + fallback_content="Unable to fetch current weather.", + description="This tool is for getting the current weather.", ) ``` @@ -375,7 +382,43 @@ client.expression_measurement.batch.get_job_predictions(
-**id:** `str` — The unique identifier for the job. +**id:** `str` — Identifier for a Tool. Formatted as a UUID. + +
+
+ +
+
+ +**parameters:** `str` + +Stringified JSON defining the parameters used by this version of the Tool. + +These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Tool version. + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. + +
+
+ +
+
+ +**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors.
@@ -395,7 +438,7 @@ client.expression_measurement.batch.get_job_predictions(
-
client.expression_measurement.batch.get_job_artifacts(...) +
client.empathic_voice.tools.delete_tool(...)
@@ -407,7 +450,9 @@ client.expression_measurement.batch.get_job_predictions(
-Get the artifacts ZIP of a completed inference job. +Deletes a **Tool** and its versions. + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -427,8 +472,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.get_job_artifacts( - id="string", +client.empathic_voice.tools.delete_tool( + id="00183a3f-79ba-413d-9f3b-609864268bea", ) ``` @@ -445,7 +490,7 @@ client.expression_measurement.batch.get_job_artifacts(
-**id:** `str` — The unique identifier for the job. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -465,7 +510,7 @@ client.expression_measurement.batch.get_job_artifacts(
-
client.expression_measurement.batch.start_inference_job_from_local_file(...) +
client.empathic_voice.tools.update_tool_name(...)
@@ -477,7 +522,9 @@ client.expression_measurement.batch.get_job_artifacts(
-Start a new batch inference job. +Updates the name of a **Tool**. + +Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -497,7 +544,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.expression_measurement.batch.start_inference_job_from_local_file() +client.empathic_voice.tools.update_tool_name( + id="00183a3f-79ba-413d-9f3b-609864268bea", + name="get_current_temperature", +) ``` @@ -513,9 +563,7 @@ client.expression_measurement.batch.start_inference_job_from_local_file()
-**file:** `from __future__ import annotations - -typing.List[core.File]` — See core.File for more documentation +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -523,7 +571,7 @@ typing.List[core.File]` — See core.File for more documentation
-**json:** `typing.Optional[InferenceBaseRequest]` — Stringified JSON object containing the inference job configuration. +**name:** `str` — Name applied to all versions of a particular Tool.
@@ -543,8 +591,7 @@ typing.List[core.File]` — See core.File for more documentation
-## EmpathicVoice Tools -
client.empathic_voice.tools.list_tools(...) +
client.empathic_voice.tools.get_tool_version(...)
@@ -556,7 +603,7 @@ typing.List[core.File]` — See core.File for more documentation
-Fetches a paginated list of **Tools**. +Fetches a specified version of a **Tool**. Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -578,15 +625,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.tools.list_tools( - page_number=0, - page_size=2, +client.empathic_voice.tools.get_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ```
@@ -602,11 +644,7 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -614,27 +652,13 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
+**version:** `int` -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. - -
-
+Version number for a Tool. -
-
+Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -**name:** `typing.Optional[str]` — Filter to only include tools with this name. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -654,7 +678,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool(...) +
client.empathic_voice.tools.delete_tool_version(...)
@@ -666,7 +690,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Tool** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). +Deletes a specified version of a **Tool**. Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -688,12 +712,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.create_tool( - name="get_current_weather", - parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', - version_description="Fetches current weather and uses celsius or fahrenheit based on location of user.", - description="This tool is for getting the current weather.", - fallback_content="Unable to fetch current weather.", +client.empathic_voice.tools.delete_tool_version( + id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, ) ``` @@ -710,19 +731,7 @@ client.empathic_voice.tools.create_tool(
-**name:** `str` — Name applied to all versions of a particular Tool. - -
-
- -
-
- -**parameters:** `str` - -Stringified JSON defining the parameters used by this version of the Tool. - -These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. +**id:** `str` — Identifier for a Tool. Formatted as a UUID.
@@ -730,23 +739,13 @@ These parameters define the inputs needed for the Tool’s execution, including
-**version_description:** `typing.Optional[str]` — An optional description of the Tool version. - -
-
- -
-
+**version:** `int` -**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. - -
-
+Version number for a Tool. -
-
+Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -766,7 +765,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-
client.empathic_voice.tools.list_tool_versions(...) +
client.empathic_voice.tools.update_tool_description(...)
@@ -778,7 +777,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-Fetches a list of a **Tool's** versions. +Updates the description of a specified **Tool** version. Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI.
@@ -800,8 +799,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.list_tool_versions( +client.empathic_voice.tools.update_tool_description( id="00183a3f-79ba-413d-9f3b-609864268bea", + version=1, + version_description="Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", ) ``` @@ -826,23 +827,13 @@ client.empathic_voice.tools.list_tool_versions(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
+**version:** `int` -**page_size:** `typing.Optional[int]` +Version number for a Tool. -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number.
@@ -850,7 +841,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each tool. To include all versions of each tool in the list, set `restrict_to_most_recent` to false. +**version_description:** `typing.Optional[str]` — An optional description of the Tool version.
@@ -870,7 +861,8 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.tools.create_tool_version(...) +## EmpathicVoice Prompts +
client.empathic_voice.prompts.list_prompts(...)
@@ -882,9 +874,9 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Updates a **Tool** by creating a new version of the **Tool**. +Fetches a paginated list of **Prompts**. -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -904,13 +896,15 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.create_tool_version( - id="00183a3f-79ba-413d-9f3b-609864268bea", - parameters='{ "type": "object", "properties": { "location": { "type": "string", "description": "The city and state, e.g. San Francisco, CA" }, "format": { "type": "string", "enum": ["celsius", "fahrenheit", "kelvin"], "description": "The temperature unit to use. Infer this from the users location." } }, "required": ["location", "format"] }', - version_description="Fetches current weather and uses celsius, fahrenheit, or kelvin based on location of user.", - fallback_content="Unable to fetch current weather.", - description="This tool is for getting the current weather.", +response = client.empathic_voice.prompts.list_prompts( + page_number=0, + page_size=2, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ``` @@ -926,19 +920,11 @@ client.empathic_voice.tools.create_tool_version(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**parameters:** `str` +**page_number:** `typing.Optional[int]` -Stringified JSON defining the parameters used by this version of the Tool. +Specifies the page number to retrieve, enabling pagination. -These parameters define the inputs needed for the Tool’s execution, including the expected data type and description for each input field. Structured as a stringified JSON schema, this format ensures the Tool receives data in the expected format. +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -946,7 +932,11 @@ These parameters define the inputs needed for the Tool’s execution, including
-**version_description:** `typing.Optional[str]` — An optional description of the Tool version. +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -954,7 +944,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-**description:** `typing.Optional[str]` — An optional description of what the Tool does, used by the supplemental LLM to choose when and how to call the function. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false.
@@ -962,7 +952,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-**fallback_content:** `typing.Optional[str]` — Optional text passed to the supplemental LLM in place of the tool call result. The LLM then uses this text to generate a response back to the user, ensuring continuity in the conversation if the Tool errors. +**name:** `typing.Optional[str]` — Filter to only include prompts with this name.
@@ -982,7 +972,7 @@ These parameters define the inputs needed for the Tool’s execution, including
-
client.empathic_voice.tools.delete_tool(...) +
client.empathic_voice.prompts.create_prompt(...)
@@ -994,9 +984,9 @@ These parameters define the inputs needed for the Tool’s execution, including
-Deletes a **Tool** and its versions. +Creates a **Prompt** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1016,8 +1006,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.delete_tool( - id="00183a3f-79ba-413d-9f3b-609864268bea", +client.empathic_voice.prompts.create_prompt( + name="Weather Assistant Prompt", + text="You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", ) ``` @@ -1034,7 +1025,29 @@ client.empathic_voice.tools.delete_tool(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**name:** `str` — Name applied to all versions of a particular Prompt. + +
+
+ +
+
+ +**text:** `str` + +Instructions used to shape EVI’s behavior, responses, and style. + +You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. + +For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Prompt version.
@@ -1054,7 +1067,7 @@ client.empathic_voice.tools.delete_tool(
-
client.empathic_voice.tools.update_tool_name(...) +
client.empathic_voice.prompts.list_prompt_versions(...)
@@ -1066,9 +1079,9 @@ client.empathic_voice.tools.delete_tool(
-Updates the name of a **Tool**. +Fetches a list of a **Prompt's** versions. -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1088,9 +1101,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.update_tool_name( - id="00183a3f-79ba-413d-9f3b-609864268bea", - name="get_current_temperature", +client.empathic_voice.prompts.list_prompt_versions( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", ) ``` @@ -1107,7 +1119,7 @@ client.empathic_voice.tools.update_tool_name(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1115,7 +1127,31 @@ client.empathic_voice.tools.update_tool_name(
-**name:** `str` — Name applied to all versions of a particular Tool. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false.
@@ -1135,7 +1171,7 @@ client.empathic_voice.tools.update_tool_name(
-
client.empathic_voice.tools.get_tool_version(...) +
client.empathic_voice.prompts.create_prompt_verison(...)
@@ -1147,9 +1183,9 @@ client.empathic_voice.tools.update_tool_name(
-Fetches a specified version of a **Tool**. +Updates a **Prompt** by creating a new version of the **Prompt**. -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1169,9 +1205,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.get_tool_version( - id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, +client.empathic_voice.prompts.create_prompt_verison( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + text="You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", + version_description="This is an updated version of the Weather Assistant Prompt.", ) ``` @@ -1188,7 +1225,7 @@ client.empathic_voice.tools.get_tool_version(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1196,13 +1233,21 @@ client.empathic_voice.tools.get_tool_version(
-**version:** `int` +**text:** `str` -Version number for a Tool. +Instructions used to shape EVI’s behavior, responses, and style for this version of the Prompt. -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). + +
+
+ +
+
+ +**version_description:** `typing.Optional[str]` — An optional description of the Prompt version.
@@ -1222,7 +1267,7 @@ Version numbers are integer values representing different iterations of the Tool
-
client.empathic_voice.tools.delete_tool_version(...) +
client.empathic_voice.prompts.delete_prompt(...)
@@ -1234,9 +1279,9 @@ Version numbers are integer values representing different iterations of the Tool
-Deletes a specified version of a **Tool**. +Deletes a **Prompt** and its versions. -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1256,9 +1301,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.delete_tool_version( - id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, +client.empathic_voice.prompts.delete_prompt( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", ) ``` @@ -1275,21 +1319,7 @@ client.empathic_voice.tools.delete_tool_version(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**version:** `int` - -Version number for a Tool. - -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1309,7 +1339,7 @@ Version numbers are integer values representing different iterations of the Tool
-
client.empathic_voice.tools.update_tool_description(...) +
client.empathic_voice.prompts.update_prompt_name(...)
@@ -1321,9 +1351,9 @@ Version numbers are integer values representing different iterations of the Tool
-Updates the description of a specified **Tool** version. +Updates the name of a **Prompt**. -Refer to our [tool use](/docs/empathic-voice-interface-evi/tool-use#function-calling) guide for comprehensive instructions on defining and integrating tools into EVI. +See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1343,10 +1373,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.tools.update_tool_description( - id="00183a3f-79ba-413d-9f3b-609864268bea", - version=1, - version_description="Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region.", +client.empathic_voice.prompts.update_prompt_name( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + name="Updated Weather Assistant Prompt Name", ) ``` @@ -1363,21 +1392,7 @@ client.empathic_voice.tools.update_tool_description(
-**id:** `str` — Identifier for a Tool. Formatted as a UUID. - -
-
- -
-
- -**version:** `int` - -Version number for a Tool. - -Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1385,7 +1400,7 @@ Version numbers are integer values representing different iterations of the Tool
-**version_description:** `typing.Optional[str]` — An optional description of the Tool version. +**name:** `str` — Name applied to all versions of a particular Prompt.
@@ -1405,8 +1420,7 @@ Version numbers are integer values representing different iterations of the Tool
-## EmpathicVoice Prompts -
client.empathic_voice.prompts.list_prompts(...) +
client.empathic_voice.prompts.get_prompt_version(...)
@@ -1418,7 +1432,7 @@ Version numbers are integer values representing different iterations of the Tool
-Fetches a paginated list of **Prompts**. +Fetches a specified version of a **Prompt**. See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1440,15 +1454,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.prompts.list_prompts( - page_number=0, - page_size=2, +client.empathic_voice.prompts.get_prompt_version( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ```
@@ -1464,11 +1473,7 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1476,27 +1481,13 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
+**version:** `int` -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false. - -
-
+Version number for a Prompt. -
-
+Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. -**name:** `typing.Optional[str]` — Filter to only include prompts with this name. +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -1516,7 +1507,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt(...) +
client.empathic_voice.prompts.delete_prompt_version(...)
@@ -1528,7 +1519,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Prompt** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). +Deletes a specified version of a **Prompt**. See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1550,9 +1541,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.create_prompt( - name="Weather Assistant Prompt", - text="You are an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", +client.empathic_voice.prompts.delete_prompt_version( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=1, ) ``` @@ -1569,7 +1560,7 @@ client.empathic_voice.prompts.create_prompt(
-**name:** `str` — Name applied to all versions of a particular Prompt. +**id:** `str` — Identifier for a Prompt. Formatted as a UUID.
@@ -1577,21 +1568,13 @@ client.empathic_voice.prompts.create_prompt(
-**text:** `str` - -Instructions used to shape EVI’s behavior, responses, and style. - -You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. +**version:** `int` -For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). - -
-
+Version number for a Prompt. -
-
+Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. -**version_description:** `typing.Optional[str]` — An optional description of the Prompt version. +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -1611,7 +1594,7 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-
client.empathic_voice.prompts.list_prompt_versions(...) +
client.empathic_voice.prompts.update_prompt_description(...)
@@ -1623,7 +1606,7 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-Fetches a list of a **Prompt's** versions. +Updates the description of a **Prompt**. See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt.
@@ -1645,8 +1628,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.list_prompt_versions( +client.empathic_voice.prompts.update_prompt_description( id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=1, + version_description="This is an updated version_description.", ) ``` @@ -1671,23 +1656,13 @@ client.empathic_voice.prompts.list_prompt_versions(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
+**version:** `int` -**page_size:** `typing.Optional[int]` +Version number for a Prompt. -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number.
@@ -1695,7 +1670,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each prompt. To include all versions of each prompt in the list, set `restrict_to_most_recent` to false. +**version_description:** `typing.Optional[str]` — An optional description of the Prompt version.
@@ -1715,7 +1690,8 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.prompts.create_prompt_verison(...) +## EmpathicVoice CustomVoices +
client.empathic_voice.custom_voices.list_custom_voices(...)
@@ -1727,9 +1703,9 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Updates a **Prompt** by creating a new version of the **Prompt**. +Fetches a paginated list of **Custom Voices**. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -1749,11 +1725,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.create_prompt_verison( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - text="You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", - version_description="This is an updated version of the Weather Assistant Prompt.", -) +client.empathic_voice.custom_voices.list_custom_voices() ``` @@ -1769,7 +1741,11 @@ client.empathic_voice.prompts.create_prompt_verison(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -1777,13 +1753,11 @@ client.empathic_voice.prompts.create_prompt_verison(
-**text:** `str` - -Instructions used to shape EVI’s behavior, responses, and style for this version of the Prompt. +**page_size:** `typing.Optional[int]` -You can use the Prompt to define a specific goal or role for EVI, specifying how it should act or what it should focus on during the conversation. For example, EVI can be instructed to act as a customer support representative, a fitness coach, or a travel advisor, each with its own set of behaviors and response styles. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice-interface-evi/prompting). +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -1791,7 +1765,7 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-**version_description:** `typing.Optional[str]` — An optional description of the Prompt version. +**name:** `typing.Optional[str]` — Filter to only include custom voices with this name.
@@ -1811,7 +1785,7 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-
client.empathic_voice.prompts.delete_prompt(...) +
client.empathic_voice.custom_voices.create_custom_voice(...)
@@ -1823,9 +1797,9 @@ For help writing a system prompt, see our [Prompting Guide](/docs/empathic-voice
-Deletes a **Prompt** and its versions. +Creates a **Custom Voice** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -1845,8 +1819,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.delete_prompt( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", +client.empathic_voice.custom_voices.create_custom_voice( + name="name", + base_voice="ITO", ) ``` @@ -1863,7 +1838,27 @@ client.empathic_voice.prompts.delete_prompt(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") + +
+
+ +
+
+ +**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. + +
+
+ +
+
+ +**parameters:** `typing.Optional[PostedCustomVoiceParameters]` + +The specified attributes of a Custom Voice. + +If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice.
@@ -1883,7 +1878,7 @@ client.empathic_voice.prompts.delete_prompt(
-
client.empathic_voice.prompts.update_prompt_name(...) +
client.empathic_voice.custom_voices.get_custom_voice(...)
@@ -1895,9 +1890,9 @@ client.empathic_voice.prompts.delete_prompt(
-Updates the name of a **Prompt**. +Fetches a specific **Custom Voice** by ID. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -1917,9 +1912,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.update_prompt_name( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - name="Updated Weather Assistant Prompt Name", +client.empathic_voice.custom_voices.get_custom_voice( + id="id", ) ``` @@ -1936,15 +1930,7 @@ client.empathic_voice.prompts.update_prompt_name(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**name:** `str` — Name applied to all versions of a particular Prompt. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -1964,7 +1950,7 @@ client.empathic_voice.prompts.update_prompt_name(
-
client.empathic_voice.prompts.get_prompt_version(...) +
client.empathic_voice.custom_voices.create_custom_voice_version(...)
@@ -1976,9 +1962,9 @@ client.empathic_voice.prompts.update_prompt_name(
-Fetches a specified version of a **Prompt**. +Updates a **Custom Voice** by creating a new version of the **Custom Voice**. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -1998,9 +1984,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.get_prompt_version( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, +client.empathic_voice.custom_voices.create_custom_voice_version( + id="id", + name="name", + base_voice="ITO", ) ``` @@ -2017,7 +2004,7 @@ client.empathic_voice.prompts.get_prompt_version(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -2025,13 +2012,7 @@ client.empathic_voice.prompts.get_prompt_version(
-**version:** `int` - -Version number for a Prompt. - -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE")
@@ -2039,7 +2020,27 @@ Version numbers are integer values representing different iterations of the Prom
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. + +
+
+ +
+
+ +**parameters:** `typing.Optional[PostedCustomVoiceParameters]` + +The specified attributes of a Custom Voice. + +If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -2051,7 +2052,7 @@ Version numbers are integer values representing different iterations of the Prom
-
client.empathic_voice.prompts.delete_prompt_version(...) +
client.empathic_voice.custom_voices.delete_custom_voice(...)
@@ -2063,9 +2064,9 @@ Version numbers are integer values representing different iterations of the Prom
-Deletes a specified version of a **Prompt**. +Deletes a **Custom Voice** and its versions. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -2085,9 +2086,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.delete_prompt_version( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=1, +client.empathic_voice.custom_voices.delete_custom_voice( + id="id", ) ``` @@ -2104,21 +2104,7 @@ client.empathic_voice.prompts.delete_prompt_version(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**version:** `int` - -Version number for a Prompt. - -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -2138,7 +2124,7 @@ Version numbers are integer values representing different iterations of the Prom
-
client.empathic_voice.prompts.update_prompt_description(...) +
client.empathic_voice.custom_voices.update_custom_voice_name(...)
@@ -2150,9 +2136,9 @@ Version numbers are integer values representing different iterations of the Prom
-Updates the description of a **Prompt**. +Updates the name of a **Custom Voice**. -See our [prompting guide](/docs/empathic-voice-interface-evi/phone-calling) for tips on crafting your system prompt. +Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice.
@@ -2172,10 +2158,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.prompts.update_prompt_description( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=1, - version_description="This is an updated version_description.", +client.empathic_voice.custom_voices.update_custom_voice_name( + id="string", + name="string", ) ``` @@ -2192,21 +2177,7 @@ client.empathic_voice.prompts.update_prompt_description(
-**id:** `str` — Identifier for a Prompt. Formatted as a UUID. - -
-
- -
-
- -**version:** `int` - -Version number for a Prompt. - -Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - -Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. +**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID.
@@ -2214,7 +2185,7 @@ Version numbers are integer values representing different iterations of the Prom
-**version_description:** `typing.Optional[str]` — An optional description of the Prompt version. +**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE")
@@ -2234,8 +2205,8 @@ Version numbers are integer values representing different iterations of the Prom
-## EmpathicVoice CustomVoices -
client.empathic_voice.custom_voices.list_custom_voices(...) +## EmpathicVoice Configs +
client.empathic_voice.configs.list_configs(...)
@@ -2247,9 +2218,9 @@ Version numbers are integer values representing different iterations of the Prom
-Fetches a paginated list of **Custom Voices**. +Fetches a paginated list of **Configs**. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2269,7 +2240,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.list_custom_voices() +client.empathic_voice.configs.list_configs( + page_number=0, + page_size=1, +) ``` @@ -2309,7 +2283,15 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**name:** `typing.Optional[str]` — Filter to only include custom voices with this name. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Filter to only include configs with this name.
@@ -2329,7 +2311,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.custom_voices.create_custom_voice(...) +
client.empathic_voice.configs.create_config(...)
@@ -2341,9 +2323,9 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Custom Voice** that can be added to an [EVI configuration](/reference/empathic-voice-interface-evi/configs/create-config). +Creates a **Config** which can be applied to EVI. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2359,13 +2341,46 @@ Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for detai ```python from hume import HumeClient +from hume.empathic_voice import ( + PostedConfigPromptSpec, + PostedEventMessageSpec, + PostedEventMessageSpecs, + PostedLanguageModel, + PostedVoice, +) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.create_custom_voice( - name="name", - base_voice="ITO", +client.empathic_voice.configs.create_config( + name="Weather Assistant Config", + prompt=PostedConfigPromptSpec( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, + ), + evi_version="2", + voice=PostedVoice( + name="SAMPLE VOICE", + ), + language_model=PostedLanguageModel( + model_provider="ANTHROPIC", + model_resource="claude-3-5-sonnet-20240620", + temperature=1.0, + ), + event_messages=PostedEventMessageSpecs( + on_new_chat=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_inactivity_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_max_duration_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + ), ) ``` @@ -2382,7 +2397,7 @@ client.empathic_voice.custom_voices.create_custom_voice(
-**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") +**evi_version:** `str` — Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2).
@@ -2390,7 +2405,7 @@ client.empathic_voice.custom_voices.create_custom_voice(
-**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. +**name:** `str` — Name applied to all versions of a particular Config.
@@ -2398,11 +2413,7 @@ client.empathic_voice.custom_voices.create_custom_voice(
-**parameters:** `typing.Optional[PostedCustomVoiceParameters]` - -The specified attributes of a Custom Voice. - -If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. +**version_description:** `typing.Optional[str]` — An optional description of the Config version.
@@ -2410,71 +2421,71 @@ If no parameters are specified then all attributes will be set to their defaults
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**prompt:** `typing.Optional[PostedConfigPromptSpec]`
- -
+
+
+**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config. +
-
-
client.empathic_voice.custom_voices.get_custom_voice(...)
-#### 📝 Description +**language_model:** `typing.Optional[PostedLanguageModel]` -
-
+The supplemental language model associated with this Config. + +This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. + +
+
-Fetches a specific **Custom Voice** by ID. +**ellm_model:** `typing.Optional[PostedEllmModel]` -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. -
-
+The eLLM setup associated with this Config. + +Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. +
-#### 🔌 Usage -
+**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config. + +
+
+
-```python -from hume import HumeClient - -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.custom_voices.get_custom_voice( - id="id", -) - -``` -
-
+**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config. + -#### ⚙️ Parameters -
+**event_messages:** `typing.Optional[PostedEventMessageSpecs]` + +
+
+
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**timeouts:** `typing.Optional[PostedTimeoutSpecs]`
@@ -2494,7 +2505,7 @@ client.empathic_voice.custom_voices.get_custom_voice(
-
client.empathic_voice.custom_voices.create_custom_voice_version(...) +
client.empathic_voice.configs.list_config_versions(...)
@@ -2506,9 +2517,9 @@ client.empathic_voice.custom_voices.get_custom_voice(
-Updates a **Custom Voice** by creating a new version of the **Custom Voice**. +Fetches a list of a **Config's** versions. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2528,10 +2539,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.create_custom_voice_version( - id="id", - name="name", - base_voice="ITO", +client.empathic_voice.configs.list_config_versions( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", ) ``` @@ -2548,7 +2557,7 @@ client.empathic_voice.custom_voices.create_custom_voice_version(
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2556,7 +2565,11 @@ client.empathic_voice.custom_voices.create_custom_voice_version(
-**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -2564,7 +2577,11 @@ client.empathic_voice.custom_voices.create_custom_voice_version(
-**base_voice:** `PostedCustomVoiceBaseVoice` — Specifies the base voice used to create the Custom Voice. +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -2572,11 +2589,7 @@ client.empathic_voice.custom_voices.create_custom_voice_version(
-**parameters:** `typing.Optional[PostedCustomVoiceParameters]` - -The specified attributes of a Custom Voice. - -If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. +**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false.
@@ -2596,7 +2609,7 @@ If no parameters are specified then all attributes will be set to their defaults
-
client.empathic_voice.custom_voices.delete_custom_voice(...) +
client.empathic_voice.configs.create_config_version(...)
@@ -2608,9 +2621,9 @@ If no parameters are specified then all attributes will be set to their defaults
-Deletes a **Custom Voice** and its versions. +Updates a **Config** by creating a new version of the **Config**. -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2626,12 +2639,51 @@ Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for detai ```python from hume import HumeClient - +from hume.empathic_voice import ( + PostedConfigPromptSpec, + PostedEllmModel, + PostedEventMessageSpec, + PostedEventMessageSpecs, + PostedLanguageModel, + PostedVoice, +) + client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.custom_voices.delete_custom_voice( - id="id", +client.empathic_voice.configs.create_config_version( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version_description="This is an updated version of the Weather Assistant Config.", + evi_version="2", + prompt=PostedConfigPromptSpec( + id="af699d45-2985-42cc-91b9-af9e5da3bac5", + version=0, + ), + voice=PostedVoice( + name="ITO", + ), + language_model=PostedLanguageModel( + model_provider="ANTHROPIC", + model_resource="claude-3-5-sonnet-20240620", + temperature=1.0, + ), + ellm_model=PostedEllmModel( + allow_short_responses=True, + ), + event_messages=PostedEventMessageSpecs( + on_new_chat=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_inactivity_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + on_max_duration_timeout=PostedEventMessageSpec( + enabled=False, + text="", + ), + ), ) ``` @@ -2648,7 +2700,7 @@ client.empathic_voice.custom_voices.delete_custom_voice(
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2656,72 +2708,79 @@ client.empathic_voice.custom_voices.delete_custom_voice(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**evi_version:** `str` — The version of the EVI used with this config.
- -
+
+
+**version_description:** `typing.Optional[str]` — An optional description of the Config version. +
-
-
client.empathic_voice.custom_voices.update_custom_voice_name(...)
-#### 📝 Description +**prompt:** `typing.Optional[PostedConfigPromptSpec]` + +
+
+**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config version. + +
+
+
-Updates the name of a **Custom Voice**. +**language_model:** `typing.Optional[PostedLanguageModel]` -Refer to our [voices guide](/docs/empathic-voice-interface-evi/voices) for details on creating a custom voice. -
-
+The supplemental language model associated with this Config version. + +This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. + -#### 🔌 Usage -
-
-
- -```python -from hume import HumeClient +**ellm_model:** `typing.Optional[PostedEllmModel]` -client = HumeClient( - api_key="YOUR_API_KEY", -) -client.empathic_voice.custom_voices.update_custom_voice_name( - id="string", - name="string", -) +The eLLM setup associated with this Config version. -``` +Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. +
+ +
+
+ +**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config version. +
-#### ⚙️ Parameters -
+**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config version. + +
+
+
-**id:** `str` — Identifier for a Custom Voice. Formatted as a UUID. +**event_messages:** `typing.Optional[PostedEventMessageSpecs]`
@@ -2729,7 +2788,7 @@ client.empathic_voice.custom_voices.update_custom_voice_name(
-**name:** `str` — The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") +**timeouts:** `typing.Optional[PostedTimeoutSpecs]`
@@ -2749,8 +2808,7 @@ client.empathic_voice.custom_voices.update_custom_voice_name(
-## EmpathicVoice Configs -
client.empathic_voice.configs.list_configs(...) +
client.empathic_voice.configs.delete_config(...)
@@ -2762,7 +2820,7 @@ client.empathic_voice.custom_voices.update_custom_voice_name(
-Fetches a paginated list of **Configs**. +Deletes a **Config** and its versions. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2784,9 +2842,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.list_configs( - page_number=0, - page_size=1, +client.empathic_voice.configs.delete_config( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", ) ``` @@ -2803,39 +2860,7 @@ client.empathic_voice.configs.list_configs(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false. - -
-
- -
-
- -**name:** `typing.Optional[str]` — Filter to only include configs with this name. +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2855,7 +2880,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config(...) +
client.empathic_voice.configs.update_config_name(...)
@@ -2867,7 +2892,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Creates a **Config** which can be applied to EVI. +Updates the name of a **Config**. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -2885,46 +2910,13 @@ For more details on configuration options and how to configure EVI, see our [con ```python from hume import HumeClient -from hume.empathic_voice import ( - PostedConfigPromptSpec, - PostedEventMessageSpec, - PostedEventMessageSpecs, - PostedLanguageModel, - PostedVoice, -) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.create_config( - name="Weather Assistant Config", - prompt=PostedConfigPromptSpec( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, - ), - evi_version="2", - voice=PostedVoice( - name="SAMPLE VOICE", - ), - language_model=PostedLanguageModel( - model_provider="ANTHROPIC", - model_resource="claude-3-5-sonnet-20240620", - temperature=1.0, - ), - event_messages=PostedEventMessageSpecs( - on_new_chat=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_inactivity_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_max_duration_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - ), +client.empathic_voice.configs.update_config_name( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + name="Updated Weather Assistant Config Name", ) ``` @@ -2941,7 +2933,7 @@ client.empathic_voice.configs.create_config(
-**evi_version:** `str` — Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2). +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -2957,71 +2949,72 @@ client.empathic_voice.configs.create_config(
-**version_description:** `typing.Optional[str]` — An optional description of the Config version. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**prompt:** `typing.Optional[PostedConfigPromptSpec]` -
+
+
client.empathic_voice.configs.get_config_version(...)
-**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config. - -
-
+#### 📝 Description
-**language_model:** `typing.Optional[PostedLanguageModel]` +
+
-The supplemental language model associated with this Config. +Fetches a specified version of a **Config**. -This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. - +For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
+
+
+ +#### 🔌 Usage
-**ellm_model:** `typing.Optional[PostedEllmModel]` +
+
-The eLLM setup associated with this Config. +```python +from hume import HumeClient -Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. - +client = HumeClient( + api_key="YOUR_API_KEY", +) +client.empathic_voice.configs.get_config_version( + id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version=1, +) + +```
- -
-
- -**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config. -
+#### ⚙️ Parameters +
-**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config. - -
-
-
-**event_messages:** `typing.Optional[PostedEventMessageSpecs]` +**id:** `str` — Identifier for a Config. Formatted as a UUID.
@@ -3029,7 +3022,13 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-**timeouts:** `typing.Optional[PostedTimeoutSpecs]` +**version:** `int` + +Version number for a Config. + +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -3049,7 +3048,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-
client.empathic_voice.configs.list_config_versions(...) +
client.empathic_voice.configs.delete_config_version(...)
@@ -3061,7 +3060,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-Fetches a list of a **Config's** versions. +Deletes a specified version of a **Config**. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3083,8 +3082,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.list_config_versions( +client.empathic_voice.configs.delete_config_version( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", + version=1, ) ``` @@ -3109,31 +3109,13 @@ client.empathic_voice.configs.list_config_versions(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +**version:** `int` -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
+Version number for a Config. -
-
+Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. -**restrict_to_most_recent:** `typing.Optional[bool]` — By default, `restrict_to_most_recent` is set to true, returning only the latest version of each config. To include all versions of each config in the list, set `restrict_to_most_recent` to false. +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -3153,7 +3135,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.configs.create_config_version(...) +
client.empathic_voice.configs.update_config_description(...)
@@ -3165,7 +3147,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Updates a **Config** by creating a new version of the **Config**. +Updates the description of a **Config**. For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration).
@@ -3183,51 +3165,14 @@ For more details on configuration options and how to configure EVI, see our [con ```python from hume import HumeClient -from hume.empathic_voice import ( - PostedConfigPromptSpec, - PostedEllmModel, - PostedEventMessageSpec, - PostedEventMessageSpecs, - PostedLanguageModel, - PostedVoice, -) client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.create_config_version( +client.empathic_voice.configs.update_config_description( id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version_description="This is an updated version of the Weather Assistant Config.", - evi_version="2", - prompt=PostedConfigPromptSpec( - id="af699d45-2985-42cc-91b9-af9e5da3bac5", - version=0, - ), - voice=PostedVoice( - name="ITO", - ), - language_model=PostedLanguageModel( - model_provider="ANTHROPIC", - model_resource="claude-3-5-sonnet-20240620", - temperature=1.0, - ), - ellm_model=PostedEllmModel( - allow_short_responses=True, - ), - event_messages=PostedEventMessageSpecs( - on_new_chat=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_inactivity_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - on_max_duration_timeout=PostedEventMessageSpec( - enabled=False, - text="", - ), - ), + version=1, + version_description="This is an updated version_description.", ) ``` @@ -3252,7 +3197,13 @@ client.empathic_voice.configs.create_config_version(
-**evi_version:** `str` — The version of the EVI used with this config. +**version:** `int` + +Version number for a Config. + +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + +Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number.
@@ -3268,55 +3219,81 @@ client.empathic_voice.configs.create_config_version(
-**prompt:** `typing.Optional[PostedConfigPromptSpec]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**voice:** `typing.Optional[PostedVoice]` — A voice specification associated with this Config version. -
+
+## EmpathicVoice Chats +
client.empathic_voice.chats.list_chats(...)
-**language_model:** `typing.Optional[PostedLanguageModel]` +#### 📝 Description -The supplemental language model associated with this Config version. +
+
-This model is used to generate longer, more detailed responses from EVI. Choosing an appropriate supplemental language model for your use case is crucial for generating fast, high-quality responses from EVI. - +
+
+ +Fetches a paginated list of **Chats**.
+
+
+ +#### 🔌 Usage
-**ellm_model:** `typing.Optional[PostedEllmModel]` +
+
-The eLLM setup associated with this Config version. +```python +from hume import HumeClient -Hume's eLLM (empathic Large Language Model) is a multimodal language model that takes into account both expression measures and language. The eLLM generates short, empathic language responses and guides text-to-speech (TTS) prosody. - +client = HumeClient( + api_key="YOUR_API_KEY", +) +response = client.empathic_voice.chats.list_chats( + page_number=0, + page_size=1, + ascending_order=True, +) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page + +``` +
+
+#### ⚙️ Parameters +
-**tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedUserDefinedToolSpec]]]` — List of user-defined tools associated with this Config version. - -
-
-
-**builtin_tools:** `typing.Optional[typing.Sequence[typing.Optional[PostedBuiltinTool]]]` — List of built-in tools associated with this Config version. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -3324,7 +3301,11 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-**event_messages:** `typing.Optional[PostedEventMessageSpecs]` +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10.
@@ -3332,7 +3313,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-**timeouts:** `typing.Optional[PostedTimeoutSpecs]` +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3352,7 +3333,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-
client.empathic_voice.configs.delete_config(...) +
client.empathic_voice.chats.list_chat_events(...)
@@ -3364,9 +3345,7 @@ Hume's eLLM (empathic Large Language Model) is a multimodal language model that
-Deletes a **Config** and its versions. - -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Fetches a paginated list of **Chat** events.
@@ -3386,9 +3365,17 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.delete_config( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", +response = client.empathic_voice.chats.list_chat_events( + id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", + page_number=0, + page_size=3, + ascending_order=True, ) +for item in response: + yield item +# alternatively, you can paginate page-by-page +for page in response.iter_pages(): + yield page ``` @@ -3404,7 +3391,39 @@ client.empathic_voice.configs.delete_config(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**id:** `str` — Identifier for a Chat. Formatted as a UUID. + +
+
+ +
+
+ +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3424,7 +3443,8 @@ client.empathic_voice.configs.delete_config(
-
client.empathic_voice.configs.update_config_name(...) +## EmpathicVoice ChatGroups +
client.empathic_voice.chat_groups.list_chat_groups(...)
@@ -3436,9 +3456,7 @@ client.empathic_voice.configs.delete_config(
-Updates the name of a **Config**. - -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Fetches a paginated list of **Chat Groups**.
@@ -3458,9 +3476,11 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.update_config_name( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - name="Updated Weather Assistant Config Name", +client.empathic_voice.chat_groups.list_chat_groups( + page_number=0, + page_size=1, + ascending_order=True, + config_id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", ) ``` @@ -3477,7 +3497,11 @@ client.empathic_voice.configs.update_config_name(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page.
@@ -3485,7 +3509,31 @@ client.empathic_voice.configs.update_config_name(
-**name:** `str` — Name applied to all versions of a particular Config. +**page_size:** `typing.Optional[int]` + +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
+ +
+
+ +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. + +
+
+ +
+
+ +**config_id:** `typing.Optional[str]` + +The unique identifier for an EVI configuration. + +Filter Chat Groups to only include Chats that used this `config_id` in their most recent Chat.
@@ -3505,7 +3553,7 @@ client.empathic_voice.configs.update_config_name(
-
client.empathic_voice.configs.get_config_version(...) +
client.empathic_voice.chat_groups.get_chat_group(...)
@@ -3517,9 +3565,7 @@ client.empathic_voice.configs.update_config_name(
-Fetches a specified version of a **Config**. - -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Fetches a **ChatGroup** by ID, including a paginated list of **Chats** associated with the **ChatGroup**.
@@ -3539,9 +3585,11 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.get_config_version( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, +client.empathic_voice.chat_groups.get_chat_group( + id="697056f0-6c7e-487d-9bd8-9c19df79f05f", + page_number=0, + page_size=1, + ascending_order=True, ) ``` @@ -3558,7 +3606,7 @@ client.empathic_voice.configs.get_config_version(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**id:** `str` — Identifier for a Chat Group. Formatted as a UUID.
@@ -3566,13 +3614,31 @@ client.empathic_voice.configs.get_config_version(
-**version:** `int` +**page_size:** `typing.Optional[int]` -Version number for a Config. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
-Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +
+
+ +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3592,7 +3658,7 @@ Version numbers are integer values representing different iterations of the Conf
-
client.empathic_voice.configs.delete_config_version(...) +
client.empathic_voice.chat_groups.list_chat_group_events(...)
@@ -3604,9 +3670,7 @@ Version numbers are integer values representing different iterations of the Conf
-Deletes a specified version of a **Config**. - -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Fetches a paginated list of **Chat** events associated with a **Chat Group**.
@@ -3626,9 +3690,11 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.delete_config_version( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, +client.empathic_voice.chat_groups.list_chat_group_events( + id="697056f0-6c7e-487d-9bd8-9c19df79f05f", + page_number=0, + page_size=3, + ascending_order=True, ) ``` @@ -3645,7 +3711,7 @@ client.empathic_voice.configs.delete_config_version(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**id:** `str` — Identifier for a Chat Group. Formatted as a UUID.
@@ -3653,13 +3719,31 @@ client.empathic_voice.configs.delete_config_version(
-**version:** `int` +**page_size:** `typing.Optional[int]` -Version number for a Config. +Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + +
+
-Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +
+
+ +**page_number:** `typing.Optional[int]` + +Specifies the page number to retrieve, enabling pagination. + +This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + +
+
+ +
+
+ +**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true.
@@ -3679,7 +3763,8 @@ Version numbers are integer values representing different iterations of the Conf
-
client.empathic_voice.configs.update_config_description(...) +## ExpressionMeasurement Batch +
client.expression_measurement.batch.list_jobs(...)
@@ -3691,9 +3776,7 @@ Version numbers are integer values representing different iterations of the Conf
-Updates the description of a **Config**. - -For more details on configuration options and how to configure EVI, see our [configuration guide](/docs/empathic-voice-interface-evi/configuration). +Sort and filter jobs.
@@ -3713,11 +3796,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.configs.update_config_description( - id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", - version=1, - version_description="This is an updated version_description.", -) +client.expression_measurement.batch.list_jobs() ``` @@ -3733,7 +3812,45 @@ client.empathic_voice.configs.update_config_description(
-**id:** `str` — Identifier for a Config. Formatted as a UUID. +**limit:** `typing.Optional[int]` — The maximum number of jobs to include in the response. + +
+
+ +
+
+ +**status:** `typing.Optional[typing.Union[Status, typing.Sequence[Status]]]` + +Include only jobs of this status in the response. There are four possible statuses: + +- `QUEUED`: The job has been received and is waiting to be processed. + +- `IN_PROGRESS`: The job is currently being processed. + +- `COMPLETED`: The job has finished processing. + +- `FAILED`: The job encountered an error and could not be completed successfully. + +
+
+ +
+
+ +**when:** `typing.Optional[When]` — Specify whether to include jobs created before or after a given `timestamp_ms`. + +
+
+ +
+
+ +**timestamp_ms:** `typing.Optional[int]` + +Provide a timestamp in milliseconds to filter jobs. + +When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided.
@@ -3741,13 +3858,15 @@ client.empathic_voice.configs.update_config_description(
-**version:** `int` +**sort_by:** `typing.Optional[SortBy]` + +Specify which timestamp to sort the jobs by. -Version number for a Config. +- `created`: Sort jobs by the time of creation, indicated by `created_timestamp_ms`. -Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +- `started`: Sort jobs by the time processing started, indicated by `started_timestamp_ms`. -Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. +- `ended`: Sort jobs by the time processing ended, indicated by `ended_timestamp_ms`.
@@ -3755,7 +3874,13 @@ Version numbers are integer values representing different iterations of the Conf
-**version_description:** `typing.Optional[str]` — An optional description of the Config version. +**direction:** `typing.Optional[Direction]` + +Specify the order in which to sort the jobs. Defaults to descending order. + +- `asc`: Sort in ascending order (chronological, with the oldest records first). + +- `desc`: Sort in descending order (reverse-chronological, with the newest records first).
@@ -3775,8 +3900,7 @@ Version numbers are integer values representing different iterations of the Conf
-## EmpathicVoice Chats -
client.empathic_voice.chats.list_chats(...) +
client.expression_measurement.batch.start_inference_job(...)
@@ -3788,7 +3912,7 @@ Version numbers are integer values representing different iterations of the Conf
-Fetches a paginated list of **Chats**. +Start a new measurement inference job.
@@ -3808,16 +3932,10 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.chats.list_chats( - page_number=0, - page_size=1, - ascending_order=True, +client.expression_measurement.batch.start_inference_job( + urls=["https://hume-tutorials.s3.amazonaws.com/faces.zip"], + notify=True, ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ``` @@ -3833,11 +3951,11 @@ for page in response.iter_pages():
-**page_number:** `typing.Optional[int]` +**models:** `typing.Optional[Models]` -Specifies the page number to retrieve, enabling pagination. +Specify the models to use for inference. -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +If this field is not explicitly set, then all models will run by default.
@@ -3845,11 +3963,19 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**page_size:** `typing.Optional[int]` +**transcription:** `typing.Optional[Transcription]` + +
+
-Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. +
+
-For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. +**urls:** `typing.Optional[typing.Sequence[str]]` + +URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. + +If you wish to supply more than 100 URLs, consider providing them as an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`).
@@ -3857,7 +3983,23 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**text:** `typing.Optional[typing.Sequence[str]]` — Text supplied directly to our Emotional Language and NER models for analysis. + +
+
+ +
+
+ +**callback_url:** `typing.Optional[str]` — If provided, a `POST` request will be made to the URL with the generated predictions on completion or the error message on failure. + +
+
+ +
+
+ +**notify:** `typing.Optional[bool]` — Whether to send an email notification to the user upon job completion/failure.
@@ -3877,7 +4019,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-
client.empathic_voice.chats.list_chat_events(...) +
client.expression_measurement.batch.get_job_details(...)
@@ -3889,7 +4031,7 @@ For example, if `page_size` is set to 10, each page will include up to 10 items.
-Fetches a paginated list of **Chat** events. +Get the request details and state of a given job.
@@ -3909,17 +4051,9 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -response = client.empathic_voice.chats.list_chat_events( - id="470a49f6-1dec-4afe-8b61-035d3b2d63b0", - page_number=0, - page_size=3, - ascending_order=True, +client.expression_measurement.batch.get_job_details( + id="job_id", ) -for item in response: - yield item -# alternatively, you can paginate page-by-page -for page in response.iter_pages(): - yield page ``` @@ -3935,39 +4069,7 @@ for page in response.iter_pages():
-**id:** `str` — Identifier for a Chat. Formatted as a UUID. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**id:** `str` — The unique identifier for the job.
@@ -3987,8 +4089,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-## EmpathicVoice ChatGroups -
client.empathic_voice.chat_groups.list_chat_groups(...) +
client.expression_measurement.batch.get_job_predictions(...)
@@ -4000,7 +4101,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-Fetches a paginated list of **Chat Groups**. +Get the JSON predictions of a completed inference job.
@@ -4020,11 +4121,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.list_chat_groups( - page_number=0, - page_size=1, - ascending_order=True, - config_id="1b60e1a0-cc59-424a-8d2c-189d354db3f3", +client.expression_measurement.batch.get_job_predictions( + id="job_id", ) ``` @@ -4041,43 +4139,7 @@ client.empathic_voice.chat_groups.list_chat_groups(
-**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. - -
-
- -
-
- -**config_id:** `typing.Optional[str]` - -The unique identifier for an EVI configuration. - -Filter Chat Groups to only include Chats that used this `config_id` in their most recent Chat. +**id:** `str` — The unique identifier for the job.
@@ -4097,7 +4159,7 @@ Filter Chat Groups to only include Chats that used this `config_id` in their mos
-
client.empathic_voice.chat_groups.get_chat_group(...) +
client.expression_measurement.batch.get_job_artifacts(...)
@@ -4109,7 +4171,7 @@ Filter Chat Groups to only include Chats that used this `config_id` in their mos
-Fetches a **ChatGroup** by ID, including a paginated list of **Chats** associated with the **ChatGroup**. +Get the artifacts ZIP of a completed inference job.
@@ -4129,11 +4191,8 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.get_chat_group( - id="697056f0-6c7e-487d-9bd8-9c19df79f05f", - page_number=0, - page_size=1, - ascending_order=True, +client.expression_measurement.batch.get_job_artifacts( + id="string", ) ``` @@ -4150,39 +4209,7 @@ client.empathic_voice.chat_groups.get_chat_group(
-**id:** `str` — Identifier for a Chat Group. Formatted as a UUID. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. - -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. - -
-
- -
-
- -**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**id:** `str` — The unique identifier for the job.
@@ -4202,7 +4229,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-
client.empathic_voice.chat_groups.list_chat_group_events(...) +
client.expression_measurement.batch.start_inference_job_from_local_file(...)
@@ -4214,7 +4241,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-Fetches a paginated list of **Chat** events associated with a **Chat Group**. +Start a new batch inference job.
@@ -4234,12 +4261,7 @@ from hume import HumeClient client = HumeClient( api_key="YOUR_API_KEY", ) -client.empathic_voice.chat_groups.list_chat_group_events( - id="697056f0-6c7e-487d-9bd8-9c19df79f05f", - page_number=0, - page_size=3, - ascending_order=True, -) +client.expression_measurement.batch.start_inference_job_from_local_file() ``` @@ -4255,31 +4277,9 @@ client.empathic_voice.chat_groups.list_chat_group_events(
-**id:** `str` — Identifier for a Chat Group. Formatted as a UUID. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` - -Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. - -For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. - -
-
- -
-
- -**page_number:** `typing.Optional[int]` - -Specifies the page number to retrieve, enabling pagination. +**file:** `from __future__ import annotations -This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. +typing.List[core.File]` — See core.File for more documentation
@@ -4287,7 +4287,7 @@ This parameter uses zero-based indexing. For example, setting `page_number` to 0
-**ascending_order:** `typing.Optional[bool]` — Specifies the sorting order of the results based on their creation date. Set to true for ascending order (chronological, with the oldest records first) and false for descending order (reverse-chronological, with the newest records first). Defaults to true. +**json:** `typing.Optional[InferenceBaseRequest]` — Stringified JSON object containing the inference job configuration.
diff --git a/src/hume/base_client.py b/src/hume/base_client.py index d293f18..889ec35 100644 --- a/src/hume/base_client.py +++ b/src/hume/base_client.py @@ -4,11 +4,11 @@ from .environment import HumeClientEnvironment import httpx from .core.client_wrapper import SyncClientWrapper -from .expression_measurement.client import ExpressionMeasurementClient from .empathic_voice.client import EmpathicVoiceClient +from .expression_measurement.client import ExpressionMeasurementClient from .core.client_wrapper import AsyncClientWrapper -from .expression_measurement.client import AsyncExpressionMeasurementClient from .empathic_voice.client import AsyncEmpathicVoiceClient +from .expression_measurement.client import AsyncExpressionMeasurementClient class BaseHumeClient: @@ -69,8 +69,8 @@ def __init__( else httpx.Client(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) - self.expression_measurement = ExpressionMeasurementClient(client_wrapper=self._client_wrapper) self.empathic_voice = EmpathicVoiceClient(client_wrapper=self._client_wrapper) + self.expression_measurement = ExpressionMeasurementClient(client_wrapper=self._client_wrapper) class AsyncBaseHumeClient: @@ -131,8 +131,8 @@ def __init__( else httpx.AsyncClient(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) - self.expression_measurement = AsyncExpressionMeasurementClient(client_wrapper=self._client_wrapper) self.empathic_voice = AsyncEmpathicVoiceClient(client_wrapper=self._client_wrapper) + self.expression_measurement = AsyncExpressionMeasurementClient(client_wrapper=self._client_wrapper) def _get_base_url(*, base_url: typing.Optional[str] = None, environment: HumeClientEnvironment) -> str: diff --git a/src/hume/empathic_voice/types/audio_output.py b/src/hume/empathic_voice/types/audio_output.py index 89001e7..3a7105f 100644 --- a/src/hume/empathic_voice/types/audio_output.py +++ b/src/hume/empathic_voice/types/audio_output.py @@ -8,7 +8,7 @@ class AudioOutput(UniversalBaseModel): """ - When provided, the output is audio. + The type of message sent through the socket; for an Audio Output message, this must be `audio_output`. """ type: typing.Literal["audio_output"] = pydantic.Field(default="audio_output") diff --git a/src/hume/empathic_voice/types/user_input.py b/src/hume/empathic_voice/types/user_input.py index fde3363..abd7cf2 100644 --- a/src/hume/empathic_voice/types/user_input.py +++ b/src/hume/empathic_voice/types/user_input.py @@ -8,7 +8,9 @@ class UserInput(UniversalBaseModel): """ - User text to insert into the conversation. + User text to insert into the conversation. Text sent through a User Input message is treated as the user’s speech to EVI. EVI processes this input and provides a corresponding response. + + Expression measurement results are not available for User Input messages, as the prosody model relies on audio input and cannot process text alone. """ type: typing.Literal["user_input"] = pydantic.Field(default="user_input")