diff --git a/pyproject.toml b/pyproject.toml index 6c6632d6..e19593d4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ name = "elevenlabs" [tool.poetry] name = "elevenlabs" -version = "1.52.0" +version = "1.53.0" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 1b716338..d2d45c7a 100644 --- a/reference.md +++ b/reference.md @@ -1,8 +1,23 @@ # Reference -
client.claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post(...) +## History +
client.history.get_all(...) +
+
+ +#### 📝 Description + +
+
+
+Returns metadata about all your generated audio. +
+
+
+
+ #### 🔌 Usage
@@ -17,9 +32,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post( - task_id="task_id", -) +client.history.get_all() ```
@@ -35,7 +48,39 @@ client.claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post(
-**task_id:** `str` — The ID task to claim. +**page_size:** `typing.Optional[int]` — How many history items to return at maximum. Can not exceed 1000, defaults to 100. + +
+
+ +
+
+ +**start_after_history_item_id:** `typing.Optional[str]` — After which ID to start fetching, use this parameter to paginate across a large collection of history items. In case this parameter is not provided history items will be fetched starting from the most recently created one ordered descending by their creation date. + +
+
+ +
+
+ +**voice_id:** `typing.Optional[str]` — Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs. + +
+
+ +
+
+ +**search:** `typing.Optional[str]` — search term used for filtering + +
+
+ +
+
+ +**source:** `typing.Optional[HistoryGetAllRequestSource]` — Source of the generated history item
@@ -55,10 +100,24 @@ client.claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post(
-
client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put(...) +
client.history.get(...) +
+
+ +#### 📝 Description + +
+
+
+Returns information about an history item by its ID. +
+
+
+
+ #### 🔌 Usage
@@ -73,9 +132,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put( - user_id="user_id", - task_id="task_id", +client.history.get( + history_item_id="HISTORY_ITEM_ID", ) ``` @@ -92,15 +150,7 @@ client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_task
-**user_id:** `str` - -
-
- -
-
- -**task_id:** `str` — The ID task review to claim. +**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
@@ -120,10 +170,24 @@ client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_task
-
client.list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post(...) +
client.history.delete(...) +
+
+ +#### 📝 Description + +
+
+
+Delete a history item by its ID +
+
+
+
+ #### 🔌 Usage
@@ -133,20 +197,13 @@ client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_task
```python -from elevenlabs import ElevenLabs, TagModel +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post( - tags=[ - [ - TagModel( - kind="lang", - value="value", - ) - ] - ], +client.history.delete( + history_item_id="HISTORY_ITEM_ID", ) ``` @@ -163,7 +220,7 @@ client.list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post(
-**tags:** `typing.Sequence[typing.Sequence[TagModel]]` +**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
@@ -171,53 +228,35 @@ client.list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post(
-**page_size:** `typing.Optional[int]` — The number of tasks to return per page. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**cursor:** `typing.Optional[str]` — Cursor for pagination, using the cursor from the previous page. -
+
+
client.history.get_audio(...)
-**unclaimed_only:** `typing.Optional[bool]` - -
-
+#### 📝 Description
-**include_instances:** `typing.Optional[bool]` - -
-
-
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
+Returns the audio of an history item. - - -
- -
client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get(...) -
-
#### 🔌 Usage @@ -233,8 +272,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get( - user_id="user_id", +client.history.get_audio( + history_item_id="HISTORY_ITEM_ID", ) ``` @@ -251,7 +290,7 @@ client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user
-**user_id:** `str` +**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs.
@@ -259,38 +298,36 @@ client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user
-**page_size:** `typing.Optional[int]` — The number of tasks to return per page. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
+
+
-
-
-**cursor:** `typing.Optional[str]` — Cursor for pagination, using the cursor from the previous page. -
+
+
client.history.download(...)
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
- - - +#### 📝 Description - - -
+
+
-
client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post(...)
+Download one or more history items. If one history item ID is provided, we will return a single audio file. If more than one history item IDs are provided, we will provide the history items packed into a .zip file. +
+
+
+
+ #### 🔌 Usage
@@ -300,18 +337,13 @@ client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user
```python -from elevenlabs import ElevenLabs, QuoteRequestModel +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post( - request=QuoteRequestModel( - content_hash="content_hash", - duration_s=1.1, - speaker_count=1, - language="language", - ), +client.history.download( + history_item_ids=["HISTORY_ITEM_ID"], ) ``` @@ -328,7 +360,15 @@ client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_re
-**request:** `QuoteRequestModel` +**history_item_ids:** `typing.Sequence[str]` — A list of history items to download, you can get IDs of history items and other metadata using the GET https://api.elevenlabs.io/v1/history endpoint. + +
+
+ +
+
+ +**output_format:** `typing.Optional[str]` — Output format to transcode the audio file, can be wav or default.
@@ -348,8 +388,8 @@ client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_re
-## History -
client.history.get_all(...) +## TextToSoundEffects +
client.text_to_sound_effects.convert(...)
@@ -361,7 +401,7 @@ client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_re
-Returns metadata about all your generated audio. +Turn text into sound effects for your videos, voice-overs or video games using the most advanced sound effects model in the world.
@@ -381,7 +421,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.history.get_all() +client.text_to_sound_effects.convert( + text="Spacious braam suitable for high-impact movie trailer moments", +) ``` @@ -397,15 +439,7 @@ client.history.get_all()
-**page_size:** `typing.Optional[int]` — How many history items to return at maximum. Can not exceed 1000, defaults to 100. - -
-
- -
-
- -**start_after_history_item_id:** `typing.Optional[str]` — After which ID to start fetching, use this parameter to paginate across a large collection of history items. In case this parameter is not provided history items will be fetched starting from the most recently created one ordered descending by their creation date. +**text:** `str` — The text that will get converted into a sound effect.
@@ -413,7 +447,7 @@ client.history.get_all()
-**voice_id:** `typing.Optional[str]` — Voice ID to be filtered for, you can use GET https://api.elevenlabs.io/v1/voices to receive a list of voices and their IDs. +**output_format:** `typing.Optional[TextToSoundEffectsConvertRequestOutputFormat]` — Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.
@@ -421,7 +455,7 @@ client.history.get_all()
-**search:** `typing.Optional[str]` — search term used for filtering +**duration_seconds:** `typing.Optional[float]` — The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None.
@@ -429,7 +463,7 @@ client.history.get_all()
-**source:** `typing.Optional[HistoryGetAllRequestSource]` — Source of the generated history item +**prompt_influence:** `typing.Optional[float]` — A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3.
@@ -437,7 +471,7 @@ client.history.get_all()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -449,7 +483,9 @@ client.history.get_all()
-
client.history.get(...) +## AudioIsolation +## samples +
client.samples.delete(...)
@@ -461,7 +497,7 @@ client.history.get_all()
-Returns information about an history item by its ID. +Removes a sample by its ID.
@@ -481,8 +517,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.history.get( - history_item_id="HISTORY_ITEM_ID", +client.samples.delete( + voice_id="VOICE_ID", + sample_id="SAMPLE_ID", ) ``` @@ -499,7 +536,15 @@ client.history.get(
-**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs. +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. + +
+
+ +
+
+ +**sample_id:** `str` — Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice.
@@ -519,7 +564,7 @@ client.history.get(
-
client.history.delete(...) +
client.samples.get_audio(...)
@@ -531,7 +576,7 @@ client.history.get(
-Delete a history item by its ID +Returns the audio corresponding to a sample attached to a voice.
@@ -551,8 +596,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.history.delete( - history_item_id="HISTORY_ITEM_ID", +client.samples.get_audio( + voice_id="VOICE_ID", + sample_id="SAMPLE_ID", ) ``` @@ -569,7 +615,7 @@ client.history.delete(
-**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs. +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
@@ -577,7 +623,15 @@ client.history.delete(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**sample_id:** `str` — Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -589,7 +643,8 @@ client.history.delete(
-
client.history.get_audio(...) +## TextToSpeech +
client.text_to_speech.convert(...)
@@ -601,7 +656,7 @@ client.history.delete(
-Returns the audio of an history item. +Converts text into speech using a voice of your choice and returns audio.
@@ -621,8 +676,11 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.history.get_audio( - history_item_id="HISTORY_ITEM_ID", +client.text_to_speech.convert( + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) ``` @@ -639,7 +697,7 @@ client.history.get_audio(
-**history_item_id:** `str` — History item ID to be used, you can use GET https://api.elevenlabs.io/v1/history to receive a list of history items and their IDs. +**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices.
@@ -647,69 +705,48 @@ client.history.get_audio(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. +**text:** `str` — The text that will get converted into speech.
- -
+
+
+**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers. +
-
-
client.history.download(...)
-#### 📝 Description - -
-
+**optimize_streaming_latency:** `typing.Optional[int]` -
-
+You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: +0 - default mode (no latency optimizations) +1 - normal latency optimizations (about 50% of possible latency improvement of option 3) +2 - strong latency optimizations (about 75% of possible latency improvement of option 3) +3 - max latency optimizations +4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). -Download one or more history items. If one history item ID is provided, we will return a single audio file. If more than one history item IDs are provided, we will provide the history items packed into a .zip file. -
-
+Defaults to None. +
-#### 🔌 Usage - -
-
-
-```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.history.download( - history_item_ids=["HISTORY_ITEM_ID"], -) - -``` -
-
+**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio. +
-#### ⚙️ Parameters - -
-
-
-**history_item_ids:** `typing.Sequence[str]` — A list of history items to download, you can get IDs of history items and other metadata using the GET https://api.elevenlabs.io/v1/history endpoint. +**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property.
@@ -717,7 +754,7 @@ client.history.download(
-**output_format:** `typing.Optional[str]` — Output format to transcode the audio file, can be wav or default. +**language_code:** `typing.Optional[str]` — Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided.
@@ -725,70 +762,55 @@ client.history.download(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**voice_settings:** `typing.Optional[VoiceSettings]` — Voice settings overriding stored settings for the given voice. They are applied only on the given request.
-
-
+
+
+**pronunciation_dictionary_locators:** `typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]` — A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request +
-
-## TextToSoundEffects -
client.text_to_sound_effects.convert(...)
-#### 📝 Description - -
-
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. + +
+
-Converts a text of your choice into sound -
-
+**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. +
-#### 🔌 Usage - -
-
-
-```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.text_to_sound_effects.convert( - text="Spacious braam suitable for high-impact movie trailer moments", -) - -``` -
-
+**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. +
-#### ⚙️ Parameters -
+**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send. + +
+
+
-**text:** `str` — The text that will get converted into a sound effect. +**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send.
@@ -796,7 +818,7 @@ client.text_to_sound_effects.convert(
-**duration_seconds:** `typing.Optional[float]` — The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None. +**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
@@ -804,7 +826,7 @@ client.text_to_sound_effects.convert(
-**prompt_influence:** `typing.Optional[float]` — A higher prompt influence makes your generation follow the prompt more closely while also making generations less variable. Must be a value between 0 and 1. Defaults to 0.3. +**apply_text_normalization:** `typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
@@ -824,9 +846,7 @@ client.text_to_sound_effects.convert(
-## AudioIsolation -## samples -
client.samples.delete(...) +
client.text_to_speech.convert_with_timestamps(...)
@@ -838,7 +858,7 @@ client.text_to_sound_effects.convert(
-Removes a sample by its ID. +Generate speech from text with precise character-level timing information for audio-text synchronization.
@@ -858,9 +878,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.samples.delete( - voice_id="VOICE_ID", - sample_id="SAMPLE_ID", +client.text_to_speech.convert_with_timestamps( + voice_id="21m00Tcm4TlvDq8ikWAM", + text="This is a test for the API of ElevenLabs.", ) ``` @@ -885,7 +905,7 @@ client.samples.delete(
-**sample_id:** `str` — Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice. +**text:** `str` — The text that will get converted into speech.
@@ -893,70 +913,112 @@ client.samples.delete(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.
+ +
+
+ +**optimize_streaming_latency:** `typing.Optional[int]` + +You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: +0 - default mode (no latency optimizations) +1 - normal latency optimizations (about 50% of possible latency improvement of option 3) +2 - strong latency optimizations (about 75% of possible latency improvement of option 3) +3 - max latency optimizations +4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). + +Defaults to None. +
+
+
+**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio. +
-
-
client.samples.get_audio(...)
-#### 📝 Description +**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property. + +
+
+**language_code:** `typing.Optional[str]` — Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. + +
+
+
-Returns the audio corresponding to a sample attached to a voice. +**voice_settings:** `typing.Optional[VoiceSettings]` — Voice settings overriding stored settings for the given voice. They are applied only on the given request. +
+ +
+
+ +**pronunciation_dictionary_locators:** `typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]` — A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request +
-#### 🔌 Usage -
+**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. + +
+
+
-```python -from elevenlabs import ElevenLabs +**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. + +
+
-client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.samples.get_audio( - voice_id="VOICE_ID", - sample_id="SAMPLE_ID", -) +
+
-``` +**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. +
+ +
+
+ +**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send. +
-#### ⚙️ Parameters -
+**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. + +
+
+
-**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. +**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions.
@@ -964,7 +1026,9 @@ client.samples.get_audio(
-**sample_id:** `str` — Sample ID to be used, you can use GET https://api.elevenlabs.io/v1/voices/{voice_id} to list all the available samples for a voice. +**apply_text_normalization:** `typing.Optional[ + BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization +]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
@@ -972,7 +1036,7 @@ client.samples.get_audio(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -984,8 +1048,7 @@ client.samples.get_audio(
-## TextToSpeech -
client.text_to_speech.convert(...) +
client.text_to_speech.convert_as_stream(...)
@@ -997,7 +1060,7 @@ client.samples.get_audio(
-Converts text into speech using a voice of your choice and returns audio. +Converts text into speech using a voice of your choice and returns audio as an audio stream.
@@ -1017,7 +1080,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.text_to_speech.convert( +client.text_to_speech.convert_as_stream( voice_id="JBFqnCBsd6RMkjVDRZzb", output_format="mp3_44100_128", text="The first move is what sets everything in motion.", @@ -1103,7 +1166,7 @@ Defaults to None.
-**voice_settings:** `typing.Optional[VoiceSettings]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. +**voice_settings:** `typing.Optional[VoiceSettings]` — Voice settings overriding stored settings for the given voice. They are applied only on the given request.
@@ -1167,7 +1230,9 @@ Defaults to None.
-**apply_text_normalization:** `typing.Optional[BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. +**apply_text_normalization:** `typing.Optional[ + BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization +]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
@@ -1187,7 +1252,7 @@ Defaults to None.
-
client.text_to_speech.convert_with_timestamps(...) +
client.text_to_speech.stream_with_timestamps(...)
@@ -1199,7 +1264,7 @@ Defaults to None.
-Generate speech from text with precise character-level timing information for audio-text synchronization. +Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken.
@@ -1219,10 +1284,14 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.text_to_speech.convert_with_timestamps( - voice_id="21m00Tcm4TlvDq8ikWAM", - text="This is a test for the API of ElevenLabs.", +response = client.text_to_speech.stream_with_timestamps( + voice_id="JBFqnCBsd6RMkjVDRZzb", + output_format="mp3_44100_128", + text="The first move is what sets everything in motion.", + model_id="eleven_multilingual_v2", ) +for chunk in response: + yield chunk ``` @@ -1303,7 +1372,7 @@ Defaults to None.
-**voice_settings:** `typing.Optional[VoiceSettings]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. +**voice_settings:** `typing.Optional[VoiceSettings]` — Voice settings overriding stored settings for the given voice. They are applied only on the given request.
@@ -1368,7 +1437,7 @@ Defaults to None.
**apply_text_normalization:** `typing.Optional[ - BodyTextToSpeechWithTimestampsV1TextToSpeechVoiceIdWithTimestampsPostApplyTextNormalization + BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization ]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model.
@@ -1389,7 +1458,8 @@ Defaults to None.
-
client.text_to_speech.convert_as_stream(...) +## SpeechToSpeech +
client.speech_to_speech.convert(...)
@@ -1401,7 +1471,7 @@ Defaults to None.
-Converts text into speech using a voice of your choice and returns audio as an audio stream. +Transform audio from one voice to another. Maintain full control over emotion, timing and delivery.
@@ -1421,11 +1491,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.text_to_speech.convert_as_stream( +client.speech_to_speech.convert( voice_id="JBFqnCBsd6RMkjVDRZzb", output_format="mp3_44100_128", - text="The first move is what sets everything in motion.", - model_id="eleven_multilingual_v2", + model_id="eleven_multilingual_sts_v2", ) ``` @@ -1450,7 +1519,9 @@ client.text_to_speech.convert_as_stream(
-**text:** `str` — The text that will get converted into speech. +**audio:** `from __future__ import annotations + +core.File` — See core.File for more documentation
@@ -1491,7 +1562,7 @@ Defaults to None.
-**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property. +**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property.
@@ -1499,7 +1570,7 @@ Defaults to None.
-**language_code:** `typing.Optional[str]` — Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. +**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored settings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
@@ -1507,7 +1578,7 @@ Defaults to None.
-**voice_settings:** `typing.Optional[VoiceSettings]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. +**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295.
@@ -1515,7 +1586,7 @@ Defaults to None.
-**pronunciation_dictionary_locators:** `typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]` — A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request +**remove_background_noise:** `typing.Optional[bool]` — If set, will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
@@ -1523,422 +1594,10 @@ Defaults to None.
-**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. -
-
- -
-
- -**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. - -
-
- -
-
- -**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. - -
-
- -
-
- -**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send. - -
-
- -
-
- -**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. - -
-
- -
-
- -**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. - -
-
- -
-
- -**apply_text_normalization:** `typing.Optional[ - BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization -]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. - -
-
- -
- - - - -
- -
client.text_to_speech.stream_with_timestamps(...) -
-
- -#### 📝 Description - -
-
- -
-
- -Converts text into speech using a voice of your choice and returns a stream of JSONs containing audio as a base64 encoded string together with information on when which character was spoken. -
-
-
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -response = client.text_to_speech.stream_with_timestamps( - voice_id="JBFqnCBsd6RMkjVDRZzb", - output_format="mp3_44100_128", - text="The first move is what sets everything in motion.", - model_id="eleven_multilingual_v2", -) -for chunk in response: - yield chunk - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. - -
-
- -
-
- -**text:** `str` — The text that will get converted into speech. - -
-
- -
-
- -**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers. - -
-
- -
-
- -**optimize_streaming_latency:** `typing.Optional[int]` - -You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: -0 - default mode (no latency optimizations) -1 - normal latency optimizations (about 50% of possible latency improvement of option 3) -2 - strong latency optimizations (about 75% of possible latency improvement of option 3) -3 - max latency optimizations -4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). - -Defaults to None. - -
-
- -
-
- -**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio. - -
-
- -
-
- -**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for text to speech, you can check this using the can_do_text_to_speech property. - -
-
- -
-
- -**language_code:** `typing.Optional[str]` — Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. - -
-
- -
-
- -**voice_settings:** `typing.Optional[VoiceSettings]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. - -
-
- -
-
- -**pronunciation_dictionary_locators:** `typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]]` — A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request - -
-
- -
-
- -**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. - -
-
- -
-
- -**previous_text:** `typing.Optional[str]` — The text that came before the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. - -
-
- -
-
- -**next_text:** `typing.Optional[str]` — The text that comes after the text of the current request. Can be used to improve the speech's continuity when concatenating together multiple generations or to influence the speech's continuity in the current generation. - -
-
- -
-
- -**previous_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that were generated before this generation. Can be used to improve the speech's continuity when splitting up a large task into multiple requests. The results will be best when the same model is used across the generations. In case both previous_text and previous_request_ids is send, previous_text will be ignored. A maximum of 3 request_ids can be send. - -
-
- -
-
- -**next_request_ids:** `typing.Optional[typing.Sequence[str]]` — A list of request_id of the samples that come after this generation. next_request_ids is especially useful for maintaining the speech's continuity when regenerating a sample that has had some audio quality issues. For example, if you have generated 3 speech clips, and you want to improve clip 2, passing the request id of clip 3 as a next_request_id (and that of clip 1 as a previous_request_id) will help maintain natural flow in the combined speech. The results will be best when the same model is used across the generations. In case both next_text and next_request_ids is send, next_text will be ignored. A maximum of 3 request_ids can be send. - -
-
- -
-
- -**use_pvc_as_ivc:** `typing.Optional[bool]` — If true, we won't use PVC version of the voice for the generation but the IVC version. This is a temporary workaround for higher latency in PVC versions. - -
-
- -
-
- -**apply_text_normalization:** `typing.Optional[ - BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization -]` — This parameter controls text normalization with three modes: 'auto', 'on', and 'off'. When set to 'auto', the system will automatically decide whether to apply text normalization (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. Cannot be turned on for 'eleven_turbo_v2_5' model. - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## SpeechToSpeech -
client.speech_to_speech.convert(...) -
-
- -#### 📝 Description - -
-
- -
-
- -Transform audio from one voice to another. Maintain full control over emotion, timing and delivery. -
-
-
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from elevenlabs import ElevenLabs - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.speech_to_speech.convert( - voice_id="JBFqnCBsd6RMkjVDRZzb", - output_format="mp3_44100_128", - model_id="eleven_multilingual_sts_v2", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**voice_id:** `str` — Voice ID to be used, you can use https://api.elevenlabs.io/v1/voices to list all the available voices. - -
-
- -
-
- -**audio:** `from __future__ import annotations - -core.File` — See core.File for more documentation - -
-
- -
-
- -**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers. - -
-
- -
-
- -**optimize_streaming_latency:** `typing.Optional[int]` - -You can turn on latency optimizations at some cost of quality. The best possible final latency varies by model. Possible values: -0 - default mode (no latency optimizations) -1 - normal latency optimizations (about 50% of possible latency improvement of option 3) -2 - strong latency optimizations (about 75% of possible latency improvement of option 3) -3 - max latency optimizations -4 - max latency optimizations, but also with text normalizer turned off for even more latency savings (best latency, but can mispronounce eg numbers and dates). - -Defaults to None. - -
-
- -
-
- -**output_format:** `typing.Optional[OutputFormat]` — The output format of the generated audio. - -
-
- -
-
- -**model_id:** `typing.Optional[str]` — Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. - -
-
- -
-
- -**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. - -
-
- -
-
- -**seed:** `typing.Optional[int]` — If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. - -
-
- -
-
- -**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. - -
-
+
+
@@ -2058,7 +1717,7 @@ Defaults to None.
-**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. +**voice_settings:** `typing.Optional[str]` — Voice settings overriding stored settings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string.
@@ -2074,7 +1733,7 @@ Defaults to None.
-**remove_background_noise:** `typing.Optional[bool]` — If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. +**remove_background_noise:** `typing.Optional[bool]` — If set, will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer.
@@ -2329,7 +1988,7 @@ client.voice_generation.create_a_previously_generated_voice(
-**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. +**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/text-to-voice/create-previews and fetch the generated_voice_id from the response header if don't have one yet.
@@ -2399,7 +2058,7 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_voice.create_previews( - voice_description="A sassy little squeaky mouse", + voice_description="A sassy squeaky mouse", text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.", ) @@ -2433,20 +2092,7 @@ client.text_to_voice.create_previews(
-**output_format:** `typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]` - -Output format of the generated audio. Must be one of: -mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps. -mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps. -mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps. -mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps. -mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps. -mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above. -pcm_16000 - PCM format (S16LE) with 16kHz sample rate. -pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate. -pcm_24000 - PCM format (S16LE) with 24kHz sample rate. -pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above. -ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs. +**output_format:** `typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat]` — Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs.
@@ -2486,7 +2132,7 @@ ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law
-Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews. +Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using POST /v1/text-to-voice/create-previews.
@@ -2507,8 +2153,8 @@ client = ElevenLabs( api_key="YOUR_API_KEY", ) client.text_to_voice.create_voice_from_preview( - voice_name="Little squeaky mouse", - voice_description="A sassy little squeaky mouse", + voice_name="Sassy squeaky mouse", + voice_description="A sassy squeaky mouse", generated_voice_id="37HceQefKmEi3bGovXjL", ) @@ -2542,7 +2188,7 @@ client.text_to_voice.create_voice_from_preview(
-**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. +**generated_voice_id:** `str` — The generated_voice_id to create, call POST /v1/text-to-voice/create-previews and fetch the generated_voice_id from the response header if don't have one yet.
@@ -2712,7 +2358,7 @@ client.user.get()
-Gets a list of all available voices for a user. +Returns a list of all available voices for a user.
@@ -3058,7 +2704,7 @@ client.voices.delete(
-Edit your settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. +Edit your settings for a specific voice. "similarity_boost" corresponds to "Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app.
@@ -3205,7 +2851,7 @@ typing.List[core.File]` — See core.File for more documentation
-**description:** `typing.Optional[str]` — How would you describe the voice? +**description:** `typing.Optional[str]` — A description of the voice.
@@ -3318,7 +2964,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-**description:** `typing.Optional[str]` — How would you describe the voice? +**description:** `typing.Optional[str]` — A description of the voice.
@@ -3358,7 +3004,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-Add a sharing voice to your collection of voices in VoiceLab. +Add a shared voice to your collection of voices.
@@ -3446,7 +3092,7 @@ client.voices.add_sharing_voice(
-Gets a list of shared voices. +Retrieves a list of shared voices.
@@ -3494,7 +3140,7 @@ client.voices.get_shared(
-**category:** `typing.Optional[str]` — voice category used for filtering +**category:** `typing.Optional[VoicesGetSharedRequestCategory]` — Voice category used for filtering
@@ -3502,7 +3148,7 @@ client.voices.get_shared(
-**gender:** `typing.Optional[str]` — gender used for filtering +**gender:** `typing.Optional[str]` — Gender used for filtering
@@ -3510,7 +3156,7 @@ client.voices.get_shared(
-**age:** `typing.Optional[str]` — age used for filtering +**age:** `typing.Optional[str]` — Age used for filtering
@@ -3518,7 +3164,7 @@ client.voices.get_shared(
-**accent:** `typing.Optional[str]` — accent used for filtering +**accent:** `typing.Optional[str]` — Accent used for filtering
@@ -3526,7 +3172,7 @@ client.voices.get_shared(
-**language:** `typing.Optional[str]` — language used for filtering +**language:** `typing.Optional[str]` — Language used for filtering
@@ -3534,7 +3180,7 @@ client.voices.get_shared(
-**search:** `typing.Optional[str]` — search term used for filtering +**search:** `typing.Optional[str]` — Search term used for filtering
@@ -3542,7 +3188,7 @@ client.voices.get_shared(
-**use_cases:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — use-case used for filtering +**use_cases:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Use-case used for filtering
@@ -3550,7 +3196,7 @@ client.voices.get_shared(
-**descriptives:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — search term used for filtering +**descriptives:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Search term used for filtering
@@ -3590,7 +3236,7 @@ client.voices.get_shared(
-**sort:** `typing.Optional[str]` — sort criteria +**sort:** `typing.Optional[str]` — Sort criteria
@@ -3676,7 +3322,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**similarity_threshold:** `typing.Optional[float]` — Threshold for voice similarity between provided sample and library voices. Must be in range <0, 2>. The smaller the value the more similar voices will be returned. +**similarity_threshold:** `typing.Optional[float]` — Threshold for voice similarity between provided sample and library voices. Values range from 0 to 2. The smaller the value the more similar voices will be returned.
@@ -3684,7 +3330,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**top_k:** `typing.Optional[int]` — Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Must be in range <1, 100>. +**top_k:** `typing.Optional[int]` — Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Values range from 1 to 100.
@@ -3818,12 +3464,12 @@ client.studio.create_podcast( model_id="21m00Tcm4TlvDq8ikWAM", mode=BodyCreatePodcastV1StudioPodcastsPostMode_Conversation( conversation=PodcastConversationModeData( - host_voice_id="host_voice_id", - guest_voice_id="guest_voice_id", + host_voice_id="aw1NgEzBg83R7vgmiJt6", + guest_voice_id="aw1NgEzBg83R7vgmiJt7", ), ), source=PodcastTextSource( - text="text", + text="This is a test podcast.", ), ) @@ -3849,7 +3495,7 @@ client.studio.create_podcast(
-**mode:** `BodyCreatePodcastV1StudioPodcastsPostMode` — The type of podcast to generate +**mode:** `BodyCreatePodcastV1StudioPodcastsPostMode` — The type of podcast to generate. Can be 'conversation', an interaction between two voices, or 'bulletin', a monologue.
@@ -3974,12 +3620,12 @@ client.projects.create_podcast( model_id="21m00Tcm4TlvDq8ikWAM", mode=BodyCreatePodcastV1ProjectsPodcastCreatePostMode_Conversation( conversation=PodcastConversationModeData( - host_voice_id="host_voice_id", - guest_voice_id="guest_voice_id", + host_voice_id="aw1NgEzBg83R7vgmiJt6", + guest_voice_id="aw1NgEzBg83R7vgmiJt7", ), ), source=PodcastTextSource( - text="text", + text="This is a test podcast.", ), ) @@ -4005,7 +3651,7 @@ client.projects.create_podcast(
-**mode:** `BodyCreatePodcastV1ProjectsPodcastCreatePostMode` — The type of podcast to generate +**mode:** `BodyCreatePodcastV1ProjectsPodcastCreatePostMode` — The type of podcast to generate. Can be 'conversation', an interaction between two voices, or 'bulletin', a monologue.
@@ -4380,8 +4026,8 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate This parameter controls text normalization with four modes: 'auto', 'on', 'apply_english' and 'off'. - When set to 'auto', the system will automatically decide whether to apply text normalization - (e.g., spelling out numbers). With 'on', text normalization will always be applied, while + When set to 'auto', the system will automatically decide whether to apply text normalization + (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. 'apply_english' is the same as 'on' but will assume that text is in English. @@ -5377,7 +5023,261 @@ client.projects.add_chapter_to_a_project(
-**project_id:** `str` — The ID of the Studio project. +**project_id:** `str` — The ID of the Studio project. + +
+
+ +
+
+ +**name:** `str` — The name of the chapter, used for identification only. + +
+
+ +
+
+ +**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the Studio project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the Studio project as blank. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+ + + + + + +
+ +
client.projects.convert_chapter(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Starts conversion of a specific chapter. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.projects.convert_chapter( + project_id="21m00Tcm4TlvDq8ikWAM", + chapter_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` — The ID of the Studio project. + +
+
+ +
+
+ +**chapter_id:** `str` — The ID of the chapter. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.projects.list_chapter_snapshots(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Gets information about all the snapshots of a chapter. Each snapshot can be downloaded as audio. Whenever a chapter is converted a snapshot will automatically be created. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.projects.list_chapter_snapshots( + project_id="21m00Tcm4TlvDq8ikWAM", + chapter_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` — The ID of the Studio project. + +
+
+ +
+
+ +**chapter_id:** `str` — The ID of the chapter. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.projects.stream_chapter_audio(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Stream the audio from a chapter snapshot. Use `GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the chapter snapshots of a chapter. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.projects.stream_chapter_audio( + project_id="21m00Tcm4TlvDq8ikWAM", + chapter_id="21m00Tcm4TlvDq8ikWAM", + chapter_snapshot_id="21m00Tcm4TlvDq8ikWAM", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` — The ID of the Studio project. + +
+
+ +
+
+ +**chapter_id:** `str` — The ID of the chapter.
@@ -5385,7 +5285,7 @@ client.projects.add_chapter_to_a_project(
-**name:** `str` — The name of the chapter, used for identification only. +**chapter_snapshot_id:** `str` — The ID of the chapter snapshot.
@@ -5393,7 +5293,7 @@ client.projects.add_chapter_to_a_project(
-**from_url:** `typing.Optional[str]` — An optional URL from which we will extract content to initialize the Studio project. If this is set, 'from_url' must be null. If neither 'from_url' or 'from_document' are provided we will initialize the Studio project as blank. +**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format.
@@ -5413,7 +5313,7 @@ client.projects.add_chapter_to_a_project(
-
client.projects.convert_chapter(...) +
client.projects.update_pronunciation_dictionaries(...)
@@ -5425,7 +5325,7 @@ client.projects.add_chapter_to_a_project(
-Starts conversion of a specific chapter. +Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does.
@@ -5440,14 +5340,19 @@ Starts conversion of a specific chapter.
```python -from elevenlabs import ElevenLabs +from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.convert_chapter( +client.projects.update_pronunciation_dictionaries( project_id="21m00Tcm4TlvDq8ikWAM", - chapter_id="21m00Tcm4TlvDq8ikWAM", + pronunciation_dictionary_locators=[ + PronunciationDictionaryVersionLocator( + pronunciation_dictionary_id="pronunciation_dictionary_id", + version_id="version_id", + ) + ], ) ``` @@ -5472,7 +5377,15 @@ client.projects.convert_chapter(
-**chapter_id:** `str` — The ID of the chapter. +**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first. + +
+
+ +
+
+ +**invalidate_affected_text:** `typing.Optional[bool]` — This will automatically mark text in this project for reconversion when the new dictionary applies or the old one no longer does.
@@ -5492,7 +5405,8 @@ client.projects.convert_chapter(
-
client.projects.list_chapter_snapshots(...) +## Dubbing +
client.dubbing.get_dubbing_resource(...)
@@ -5504,7 +5418,7 @@ client.projects.convert_chapter(
-Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created. +Given a dubbing ID generated from the '/v1/dubbing' endpoint with studio enabled, returns the dubbing resource.
@@ -5524,9 +5438,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.list_chapter_snapshots( - project_id="21m00Tcm4TlvDq8ikWAM", - chapter_id="21m00Tcm4TlvDq8ikWAM", +client.dubbing.get_dubbing_resource( + dubbing_id="dubbing_id", ) ``` @@ -5543,15 +5456,7 @@ client.projects.list_chapter_snapshots(
-**project_id:** `str` — The ID of the Studio project. - -
-
- -
-
- -**chapter_id:** `str` — The ID of the chapter. +**dubbing_id:** `str` — ID of the dubbing project.
@@ -5571,7 +5476,7 @@ client.projects.list_chapter_snapshots(
-
client.projects.stream_chapter_audio(...) +
client.dubbing.add_language_to_resource(...)
@@ -5583,7 +5488,7 @@ client.projects.list_chapter_snapshots(
-Stream the audio from a chapter snapshot. Use `GET /v1/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the chapter snapshots of a chapter. +Adds the given ElevenLab Turbo V2/V2.5 language code to the resource. Does not automatically generate transcripts/translations/audio.
@@ -5603,10 +5508,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.stream_chapter_audio( - project_id="21m00Tcm4TlvDq8ikWAM", - chapter_id="21m00Tcm4TlvDq8ikWAM", - chapter_snapshot_id="21m00Tcm4TlvDq8ikWAM", +client.dubbing.add_language_to_resource( + dubbing_id="dubbing_id", + language="language", ) ``` @@ -5623,23 +5527,7 @@ client.projects.stream_chapter_audio(
-**project_id:** `str` — The ID of the Studio project. - -
-
- -
-
- -**chapter_id:** `str` — The ID of the chapter. - -
-
- -
-
- -**chapter_snapshot_id:** `str` — The ID of the chapter snapshot. +**dubbing_id:** `str` — ID of the dubbing project.
@@ -5647,7 +5535,7 @@ client.projects.stream_chapter_audio(
-**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format. +**language:** `str` — The Target language.
@@ -5667,7 +5555,7 @@ client.projects.stream_chapter_audio(
-
client.projects.update_pronunciation_dictionaries(...) +
client.dubbing.create_segment_for_speaker(...)
@@ -5679,7 +5567,7 @@ client.projects.stream_chapter_audio(
-Updates the set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does. +Creates a new segment in dubbing resource with a start and end time for the speaker in every available language. Does not automatically generate transcripts/translations/audio.
@@ -5694,19 +5582,16 @@ Updates the set of pronunciation dictionaries acting on a project. This will aut
```python -from elevenlabs import ElevenLabs, PronunciationDictionaryVersionLocator +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.projects.update_pronunciation_dictionaries( - project_id="21m00Tcm4TlvDq8ikWAM", - pronunciation_dictionary_locators=[ - PronunciationDictionaryVersionLocator( - pronunciation_dictionary_id="pronunciation_dictionary_id", - version_id="version_id", - ) - ], +client.dubbing.create_segment_for_speaker( + dubbing_id="dubbing_id", + speaker_id="speaker_id", + start_time=1.1, + end_time=1.1, ) ``` @@ -5723,7 +5608,7 @@ client.projects.update_pronunciation_dictionaries(
-**project_id:** `str` — The ID of the Studio project. +**dubbing_id:** `str` — ID of the dubbing project.
@@ -5731,7 +5616,7 @@ client.projects.update_pronunciation_dictionaries(
-**pronunciation_dictionary_locators:** `typing.Sequence[PronunciationDictionaryVersionLocator]` — A list of pronunciation dictionary locators (pronunciation_dictionary_id, version_id) encoded as a list of JSON strings for pronunciation dictionaries to be applied to the text. A list of json encoded strings is required as adding projects may occur through formData as opposed to jsonBody. To specify multiple dictionaries use multiple --form lines in your curl, such as --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"Vmd4Zor6fplcA7WrINey\",\"version_id\":\"hRPaxjlTdR7wFMhV4w0b\"}"' --form 'pronunciation_dictionary_locators="{\"pronunciation_dictionary_id\":\"JzWtcGQMJ6bnlWwyMo7e\",\"version_id\":\"lbmwxiLu4q6txYxgdZqn\"}"'. Note that multiple dictionaries are not currently supported by our UI which will only show the first. +**speaker_id:** `str` — ID of the speaker.
@@ -5739,7 +5624,23 @@ client.projects.update_pronunciation_dictionaries(
-**invalidate_affected_text:** `typing.Optional[bool]` — This will automatically mark text in this project for reconversion when the new dictionary applies or the old one no longer does. +**start_time:** `float` + +
+
+ +
+
+ +**end_time:** `float` + +
+
+ +
+
+ +**text:** `typing.Optional[str]`
@@ -5759,8 +5660,7 @@ client.projects.update_pronunciation_dictionaries(
-## Dubbing -
client.dubbing.dub_a_video_or_an_audio_file(...) +
client.dubbing.update_segment_language(...)
@@ -5772,7 +5672,7 @@ client.projects.update_pronunciation_dictionaries(
-Dubs provided audio or video file into given language. +Modifies a single segment with new text and/or start/end times. Will update the values for only a specific language of a segment. Does not automatically regenerate the dub.
@@ -5792,8 +5692,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.dubbing.dub_a_video_or_an_audio_file( - target_lang="target_lang", +client.dubbing.update_segment_language( + dubbing_id="dubbing_id", + segment_id="segment_id", + language="language", ) ``` @@ -5810,57 +5712,7 @@ client.dubbing.dub_a_video_or_an_audio_file(
-**target_lang:** `str` — The Target language to dub the content into. - -
-
- -
-
- -**file:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation - -
-
- -
-
- -**name:** `typing.Optional[str]` — Name of the dubbing project. - -
-
- -
-
- -**source_url:** `typing.Optional[str]` — URL of the source video/audio file. - -
-
- -
-
- -**source_lang:** `typing.Optional[str]` — Source language. - -
-
- -
-
- -**num_speakers:** `typing.Optional[int]` — Number of speakers to use for the dubbing. Set to 0 to automatically detect the number of speakers - -
-
- -
-
- -**watermark:** `typing.Optional[bool]` — Whether to apply watermark to the output video. +**dubbing_id:** `str` — ID of the dubbing project.
@@ -5868,7 +5720,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**start_time:** `typing.Optional[int]` — Start time of the source video/audio file. +**segment_id:** `str` — ID of the segment
@@ -5876,7 +5728,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**end_time:** `typing.Optional[int]` — End time of the source video/audio file. +**language:** `str` — ID of the language.
@@ -5884,7 +5736,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**highest_resolution:** `typing.Optional[bool]` — Whether to use the highest resolution available. +**start_time:** `typing.Optional[float]`
@@ -5892,7 +5744,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**drop_background_audio:** `typing.Optional[bool]` — An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues. +**end_time:** `typing.Optional[float]`
@@ -5900,7 +5752,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**use_profanity_filter:** `typing.Optional[bool]` — [BETA] Whether transcripts should have profanities censored with the words '[censored]' +**text:** `typing.Optional[str]`
@@ -5920,7 +5772,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
client.dubbing.get_dubbing_project_metadata(...) +
client.dubbing.delete_segment(...)
@@ -5932,7 +5784,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-Returns metadata about a dubbing project, including whether it's still in progress or not +Deletes a single segment from the dubbing.
@@ -5952,8 +5804,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.dubbing.get_dubbing_project_metadata( +client.dubbing.delete_segment( dubbing_id="dubbing_id", + segment_id="segment_id", ) ``` @@ -5978,6 +5831,14 @@ client.dubbing.get_dubbing_project_metadata(
+**segment_id:** `str` — ID of the segment + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -5990,7 +5851,7 @@ client.dubbing.get_dubbing_project_metadata(
-
client.dubbing.delete_dubbing_project(...) +
client.dubbing.transcribe_segments(...)
@@ -6002,7 +5863,7 @@ client.dubbing.get_dubbing_project_metadata(
-Deletes a dubbing project. +Regenerate the transcriptions for the specified segments. Does not automatically regenerate translations or dubs.
@@ -6022,8 +5883,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.dubbing.delete_dubbing_project( +client.dubbing.transcribe_segments( dubbing_id="dubbing_id", + segments=["segments"], ) ``` @@ -6048,6 +5910,14 @@ client.dubbing.delete_dubbing_project(
+**segments:** `typing.Sequence[str]` — Transcribe this specific list of segments. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -6060,7 +5930,7 @@ client.dubbing.delete_dubbing_project(
-
client.dubbing.get_transcript_for_dub(...) +
client.dubbing.translate_segments(...)
@@ -6072,7 +5942,7 @@ client.dubbing.delete_dubbing_project(
-Returns transcript for the dub as an SRT file. +Regenerate the translations for either the entire resource or the specified segments/languages. Will automatically transcribe missing transcriptions. Will not automatically regenerate the dubs.
@@ -6092,9 +5962,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.dubbing.get_transcript_for_dub( +client.dubbing.translate_segments( dubbing_id="dubbing_id", - language_code="language_code", + segments=["segments"], + languages=["languages"], ) ``` @@ -6119,7 +5990,7 @@ client.dubbing.get_transcript_for_dub(
-**language_code:** `str` — ID of the language. +**segments:** `typing.Sequence[str]` — Translate only this list of segments.
@@ -6127,7 +5998,7 @@ client.dubbing.get_transcript_for_dub(
-**format_type:** `typing.Optional[DubbingGetTranscriptForDubRequestFormatType]` — Format to use for the subtitle file, either 'srt' or 'webvtt' +**languages:** `typing.Sequence[str]` — Translate only these languages for each segment.
@@ -6147,8 +6018,7 @@ client.dubbing.get_transcript_for_dub(
-## models -
client.models.get_all() +
client.dubbing.dub_segments(...)
@@ -6160,7 +6030,7 @@ client.dubbing.get_transcript_for_dub(
-Gets a list of available models. +Regenerate the dubs for either the entire resource or the specified segments/languages. Will automatically transcribe and translate any missing transcriptions and translations.
@@ -6180,7 +6050,11 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.models.get_all() +client.dubbing.dub_segments( + dubbing_id="dubbing_id", + segments=["segments"], + languages=["languages"], +) ``` @@ -6196,6 +6070,30 @@ client.models.get_all()
+**dubbing_id:** `str` — ID of the dubbing project. + +
+
+ +
+
+ +**segments:** `typing.Sequence[str]` — Dub only this list of segments. + +
+
+ +
+
+ +**languages:** `typing.Sequence[str]` — Dub only these languages for each segment. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -6208,8 +6106,7 @@ client.models.get_all()
-## AudioNative -
client.audio_native.create(...) +
client.dubbing.dub_a_video_or_an_audio_file(...)
@@ -6221,7 +6118,7 @@ client.models.get_all()
-Creates AudioNative enabled project, optionally starts conversion and returns project id and embeddable html snippet. +Dubs a provided audio or video file into given language.
@@ -6241,8 +6138,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.audio_native.create( - name="name", +client.dubbing.dub_a_video_or_an_audio_file( + target_lang="target_lang", ) ``` @@ -6259,7 +6156,7 @@ client.audio_native.create(
-**name:** `str` — Project name. +**target_lang:** `str` — The Target language to dub the content into.
@@ -6267,7 +6164,9 @@ client.audio_native.create(
-**image:** `typing.Optional[str]` — (Deprecated) Image URL used in the player. If not provided, default image set in the Player settings is used. +**file:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
@@ -6275,7 +6174,7 @@ client.audio_native.create(
-**author:** `typing.Optional[str]` — Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used. +**name:** `typing.Optional[str]` — Name of the dubbing project.
@@ -6283,7 +6182,7 @@ client.audio_native.create(
-**title:** `typing.Optional[str]` — Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used. +**source_url:** `typing.Optional[str]` — URL of the source video/audio file.
@@ -6291,7 +6190,7 @@ client.audio_native.create(
-**small:** `typing.Optional[bool]` — (Deprecated) Whether to use small player or not. If not provided, default value set in the Player settings is used. +**source_lang:** `typing.Optional[str]` — Source language.
@@ -6299,7 +6198,7 @@ client.audio_native.create(
-**text_color:** `typing.Optional[str]` — Text color used in the player. If not provided, default text color set in the Player settings is used. +**num_speakers:** `typing.Optional[int]` — Number of speakers to use for the dubbing. Set to 0 to automatically detect the number of speakers
@@ -6307,7 +6206,7 @@ client.audio_native.create(
-**background_color:** `typing.Optional[str]` — Background color used in the player. If not provided, default background color set in the Player settings is used. +**watermark:** `typing.Optional[bool]` — Whether to apply watermark to the output video.
@@ -6315,7 +6214,7 @@ client.audio_native.create(
-**sessionization:** `typing.Optional[int]` — (Deprecated) Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used. +**start_time:** `typing.Optional[int]` — Start time of the source video/audio file.
@@ -6323,7 +6222,15 @@ client.audio_native.create(
-**voice_id:** `typing.Optional[str]` — Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used. +**end_time:** `typing.Optional[int]` — End time of the source video/audio file. + +
+
+ +
+
+ +**highest_resolution:** `typing.Optional[bool]` — Whether to use the highest resolution available.
@@ -6331,7 +6238,7 @@ client.audio_native.create(
-**model_id:** `typing.Optional[str]` — TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used. +**drop_background_audio:** `typing.Optional[bool]` — An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues.
@@ -6339,9 +6246,7 @@ client.audio_native.create(
-**file:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**use_profanity_filter:** `typing.Optional[bool]` — [BETA] Whether transcripts should have profanities censored with the words '[censored]'
@@ -6349,7 +6254,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not. +**dubbing_studio:** `typing.Optional[bool]` — Whether to prepare dub for edits in dubbing studio or edits as a dubbing resource.
@@ -6369,7 +6274,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
client.audio_native.get_settings(...) +
client.dubbing.get_dubbing_project_metadata(...)
@@ -6381,7 +6286,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-Get player settings for the specific project. +Returns metadata about a dubbing project, including whether it's still in progress or not
@@ -6401,8 +6306,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.audio_native.get_settings( - project_id="21m00Tcm4TlvDq8ikWAM", +client.dubbing.get_dubbing_project_metadata( + dubbing_id="dubbing_id", ) ``` @@ -6419,7 +6324,7 @@ client.audio_native.get_settings(
-**project_id:** `str` — The ID of the Studio project. +**dubbing_id:** `str` — ID of the dubbing project.
@@ -6439,7 +6344,7 @@ client.audio_native.get_settings(
-
client.audio_native.update_content(...) +
client.dubbing.delete_dubbing_project(...)
@@ -6451,7 +6356,7 @@ client.audio_native.get_settings(
-Updates content for the specific AudioNative Project. +Deletes a dubbing project.
@@ -6471,8 +6376,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.audio_native.update_content( - project_id="21m00Tcm4TlvDq8ikWAM", +client.dubbing.delete_dubbing_project( + dubbing_id="dubbing_id", ) ``` @@ -6489,33 +6394,7 @@ client.audio_native.update_content(
-**project_id:** `str` — The ID of the Studio project. - -
-
- -
-
- -**file:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation - -
-
- -
-
- -**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not. - -
-
- -
-
- -**auto_publish:** `typing.Optional[bool]` — Whether to auto publish the new project snapshot after it's converted. +**dubbing_id:** `str` — ID of the dubbing project.
@@ -6535,8 +6414,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-## Usage -
client.usage.get_characters_usage_metrics(...) +
client.dubbing.get_transcript_for_dub(...)
@@ -6548,7 +6426,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis. +Returns transcript for the dub as an SRT or WEBVTT file.
@@ -6568,9 +6446,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.usage.get_characters_usage_metrics( - start_unix=1, - end_unix=1, +client.dubbing.get_transcript_for_dub( + dubbing_id="dubbing_id", + language_code="language_code", ) ``` @@ -6587,7 +6465,7 @@ client.usage.get_characters_usage_metrics(
-**start_unix:** `int` — UTC Unix timestamp for the start of the usage window, in milliseconds. To include the first day of the window, the timestamp should be at 00:00:00 of that day. +**dubbing_id:** `str` — ID of the dubbing project.
@@ -6595,7 +6473,7 @@ client.usage.get_characters_usage_metrics(
-**end_unix:** `int` — UTC Unix timestamp for the end of the usage window, in milliseconds. To include the last day of the window, the timestamp should be at 23:59:59 of that day. +**language_code:** `str` — ID of the language.
@@ -6603,7 +6481,7 @@ client.usage.get_characters_usage_metrics(
-**include_workspace_metrics:** `typing.Optional[bool]` — Whether or not to include the statistics of the entire workspace. +**format_type:** `typing.Optional[DubbingGetTranscriptForDubRequestFormatType]` — Format to use for the subtitle file, either 'srt' or 'webvtt'
@@ -6611,11 +6489,64 @@ client.usage.get_characters_usage_metrics(
-**breakdown_type:** `typing.Optional[BreakdownTypes]` — How to break down the information. Cannot be "user" if include_workspace_metrics is False. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +
+
+ +
+ + + + +
+ +## models +
client.models.get_all() +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Gets a list of available models. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.models.get_all() + +``` +
+
+#### ⚙️ Parameters + +
+
+
@@ -6631,8 +6562,8 @@ client.usage.get_characters_usage_metrics(
-## PronunciationDictionary -
client.pronunciation_dictionary.add_from_file(...) +## AudioNative +
client.audio_native.create(...)
@@ -6644,7 +6575,7 @@ client.usage.get_characters_usage_metrics(
-Creates a new pronunciation dictionary from a lexicon .PLS file +Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet.
@@ -6664,7 +6595,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.add_from_file( +client.audio_native.create( name="name", ) @@ -6682,7 +6613,7 @@ client.pronunciation_dictionary.add_from_file(
-**name:** `str` — The name of the pronunciation dictionary, used for identification only. +**name:** `str` — Project name.
@@ -6690,9 +6621,7 @@ client.pronunciation_dictionary.add_from_file(
-**file:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**image:** `typing.Optional[str]` — (Deprecated) Image URL used in the player. If not provided, default image set in the Player settings is used.
@@ -6700,7 +6629,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**description:** `typing.Optional[str]` — A description of the pronunciation dictionary, used for identification only. +**author:** `typing.Optional[str]` — Author used in the player and inserted at the start of the uploaded article. If not provided, the default author set in the Player settings is used.
@@ -6708,7 +6637,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**workspace_access:** `typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess]` — Should be one of 'editor' or 'viewer'. If not provided, defaults to no access. +**title:** `typing.Optional[str]` — Title used in the player and inserted at the top of the uploaded article. If not provided, the default title set in the Player settings is used.
@@ -6716,78 +6645,57 @@ typing.Optional[core.File]` — See core.File for more documentation
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**small:** `typing.Optional[bool]` — (Deprecated) Whether to use small player or not. If not provided, default value set in the Player settings is used.
- -
+
+
+**text_color:** `typing.Optional[str]` — Text color used in the player. If not provided, default text color set in the Player settings is used. +
-
-
client.pronunciation_dictionary.add_rules(...)
-#### 📝 Description - -
-
+**background_color:** `typing.Optional[str]` — Background color used in the player. If not provided, default background color set in the Player settings is used. + +
+
-Add rules to the pronunciation dictionary -
-
+**sessionization:** `typing.Optional[int]` — (Deprecated) Specifies for how many minutes to persist the session across page reloads. If not provided, default sessionization set in the Player settings is used. +
-#### 🔌 Usage -
+**voice_id:** `typing.Optional[str]` — Voice ID used to voice the content. If not provided, default voice ID set in the Player settings is used. + +
+
+
-```python -from elevenlabs import ElevenLabs -from elevenlabs.pronunciation_dictionary import ( - PronunciationDictionaryRule_Alias, -) - -client = ElevenLabs( - api_key="YOUR_API_KEY", -) -client.pronunciation_dictionary.add_rules( - pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", - rules=[ - PronunciationDictionaryRule_Alias( - string_to_replace="string_to_replace", - alias="alias", - ) - ], -) - -``` -
-
+**model_id:** `typing.Optional[str]` — TTS Model ID used in the player. If not provided, default model ID set in the Player settings is used. + -#### ⚙️ Parameters -
-
-
+**file:** `from __future__ import annotations -**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary +typing.Optional[core.File]` — See core.File for more documentation
@@ -6795,11 +6703,7 @@ client.pronunciation_dictionary.add_rules(
-**rules:** `typing.Sequence[PronunciationDictionaryRule]` - -List of pronunciation rules. Rule can be either: - an alias rule: {'string_to_replace': 'a', 'type': 'alias', 'alias': 'b', } - or a phoneme rule: {'string_to_replace': 'a', 'type': 'phoneme', 'phoneme': 'b', 'alphabet': 'ipa' } +**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not.
@@ -6819,7 +6723,7 @@ List of pronunciation rules. Rule can be either:
-
client.pronunciation_dictionary.remove_rules(...) +
client.audio_native.get_settings(...)
@@ -6831,7 +6735,7 @@ List of pronunciation rules. Rule can be either:
-Remove rules from the pronunciation dictionary +Get player settings for the specific project.
@@ -6851,9 +6755,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.remove_rules( - pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", - rule_strings=["rule_strings"], +client.audio_native.get_settings( + project_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -6870,15 +6773,7 @@ client.pronunciation_dictionary.remove_rules(
-**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary - -
-
- -
-
- -**rule_strings:** `typing.Sequence[str]` — List of strings to remove from the pronunciation dictionary. +**project_id:** `str` — The ID of the Studio project.
@@ -6898,7 +6793,7 @@ client.pronunciation_dictionary.remove_rules(
-
client.pronunciation_dictionary.download(...) +
client.audio_native.update_content(...)
@@ -6910,7 +6805,7 @@ client.pronunciation_dictionary.remove_rules(
-Get PLS file with a pronunciation dictionary version rules +Updates content for the specific AudioNative Project.
@@ -6930,9 +6825,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.download( - dictionary_id="Fm6AvNgS53NXe6Kqxp3e", - version_id="KZFyRUq3R6kaqhKI146w", +client.audio_native.update_content( + project_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -6941,15 +6835,33 @@ client.pronunciation_dictionary.download(
-#### ⚙️ Parameters - -
-
- +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` — The ID of the Studio project. + +
+
+ +
+
+ +**file:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+
-**dictionary_id:** `str` — The id of the pronunciation dictionary +**auto_convert:** `typing.Optional[bool]` — Whether to auto convert the project to audio or not.
@@ -6957,7 +6869,7 @@ client.pronunciation_dictionary.download(
-**version_id:** `str` — The id of the version of the pronunciation dictionary +**auto_publish:** `typing.Optional[bool]` — Whether to auto publish the new project snapshot after it's converted.
@@ -6977,7 +6889,8 @@ client.pronunciation_dictionary.download(
-
client.pronunciation_dictionary.get(...) +## Usage +
client.usage.get_characters_usage_metrics(...)
@@ -6989,7 +6902,7 @@ client.pronunciation_dictionary.download(
-Get metadata for a pronunciation dictionary +Returns the credit usage metrics for the current user or the entire workspace they are part of. The response will return a time axis with unix timestamps for each day and daily usage along that axis. The usage will be broken down by the specified breakdown type. For example, breakdown type "voice" will return the usage of each voice along the time axis.
@@ -7009,8 +6922,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.get( - pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e", +client.usage.get_characters_usage_metrics( + start_unix=1, + end_unix=1, ) ``` @@ -7027,7 +6941,31 @@ client.pronunciation_dictionary.get(
-**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary +**start_unix:** `int` — UTC Unix timestamp for the start of the usage window, in milliseconds. To include the first day of the window, the timestamp should be at 00:00:00 of that day. + +
+
+ +
+
+ +**end_unix:** `int` — UTC Unix timestamp for the end of the usage window, in milliseconds. To include the last day of the window, the timestamp should be at 23:59:59 of that day. + +
+
+ +
+
+ +**include_workspace_metrics:** `typing.Optional[bool]` — Whether or not to include the statistics of the entire workspace. + +
+
+ +
+
+ +**breakdown_type:** `typing.Optional[BreakdownTypes]` — How to break down the information. Cannot be "user" if include_workspace_metrics is False.
@@ -7047,7 +6985,8 @@ client.pronunciation_dictionary.get(
-
client.pronunciation_dictionary.get_all(...) +## PronunciationDictionary +
client.pronunciation_dictionary.add_from_file(...)
@@ -7059,7 +6998,7 @@ client.pronunciation_dictionary.get(
-Get a list of the pronunciation dictionaries you have access to and their metadata +Creates a new pronunciation dictionary from a lexicon .PLS file
@@ -7079,8 +7018,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.pronunciation_dictionary.get_all( - page_size=1, +client.pronunciation_dictionary.add_from_file( + name="name", ) ``` @@ -7097,7 +7036,7 @@ client.pronunciation_dictionary.get_all(
-**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response. +**name:** `str` — The name of the pronunciation dictionary, used for identification only.
@@ -7105,7 +7044,25 @@ client.pronunciation_dictionary.get_all(
-**page_size:** `typing.Optional[int]` — How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30. +**file:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ +**description:** `typing.Optional[str]` — A description of the pronunciation dictionary, used for identification only. + +
+
+ +
+
+ +**workspace_access:** `typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess]` — Should be one of 'admin', 'editor' or 'viewer'. If not provided, defaults to no access.
@@ -7125,8 +7082,7 @@ client.pronunciation_dictionary.get_all(
-## Workspace -
client.workspace.search_user_groups(...) +
client.pronunciation_dictionary.add_rules(...)
@@ -7138,7 +7094,7 @@ client.pronunciation_dictionary.get_all(
-Searches for user groups in the workspace. Multiple or no groups may be returned. +Add rules to the pronunciation dictionary
@@ -7154,12 +7110,21 @@ Searches for user groups in the workspace. Multiple or no groups may be returned ```python from elevenlabs import ElevenLabs +from elevenlabs.pronunciation_dictionary import ( + PronunciationDictionaryRule_Alias, +) client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.search_user_groups( - name="name", +client.pronunciation_dictionary.add_rules( + pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", + rules=[ + PronunciationDictionaryRule_Alias( + string_to_replace="Thailand", + alias="tie-land", + ) + ], ) ``` @@ -7176,7 +7141,19 @@ client.workspace.search_user_groups(
-**name:** `str` — Name of the group to find. +**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary + +
+
+ +
+
+ +**rules:** `typing.Sequence[PronunciationDictionaryRule]` + +List of pronunciation rules. Rule can be either: + an alias rule: {'string_to_replace': 'a', 'type': 'alias', 'alias': 'b', } + or a phoneme rule: {'string_to_replace': 'a', 'type': 'phoneme', 'phoneme': 'b', 'alphabet': 'ipa' }
@@ -7196,7 +7173,7 @@ client.workspace.search_user_groups(
-
client.workspace.delete_member_from_user_group(...) +
client.pronunciation_dictionary.remove_rules(...)
@@ -7208,7 +7185,7 @@ client.workspace.search_user_groups(
-Removes a member from the specified group. This endpoint may only be called by workspace administrators. +Remove rules from the pronunciation dictionary
@@ -7228,9 +7205,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.delete_member_from_user_group( - group_id="group_id", - email="email", +client.pronunciation_dictionary.remove_rules( + pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", + rule_strings=["rule_strings"], ) ``` @@ -7247,7 +7224,7 @@ client.workspace.delete_member_from_user_group(
-**group_id:** `str` — The ID of the target group. +**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
@@ -7255,7 +7232,7 @@ client.workspace.delete_member_from_user_group(
-**email:** `str` — The email of the target workspace member. +**rule_strings:** `typing.Sequence[str]` — List of strings to remove from the pronunciation dictionary.
@@ -7275,7 +7252,7 @@ client.workspace.delete_member_from_user_group(
-
client.workspace.add_member_to_user_group(...) +
client.pronunciation_dictionary.download(...)
@@ -7287,7 +7264,7 @@ client.workspace.delete_member_from_user_group(
-Adds a member of your workspace to the specified group. This endpoint may only be called by workspace administrators. +Get a PLS file with a pronunciation dictionary version rules
@@ -7307,9 +7284,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.add_member_to_user_group( - group_id="group_id", - email="email", +client.pronunciation_dictionary.download( + dictionary_id="Fm6AvNgS53NXe6Kqxp3e", + version_id="KZFyRUq3R6kaqhKI146w", ) ``` @@ -7326,7 +7303,7 @@ client.workspace.add_member_to_user_group(
-**group_id:** `str` — The ID of the target group. +**dictionary_id:** `str` — The id of the pronunciation dictionary
@@ -7334,7 +7311,7 @@ client.workspace.add_member_to_user_group(
-**email:** `str` — The email of the target workspace member. +**version_id:** `str` — The id of the version of the pronunciation dictionary
@@ -7342,7 +7319,7 @@ client.workspace.add_member_to_user_group(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response.
@@ -7354,7 +7331,7 @@ client.workspace.add_member_to_user_group(
-
client.workspace.invite_user(...) +
client.pronunciation_dictionary.get(...)
@@ -7366,7 +7343,7 @@ client.workspace.add_member_to_user_group(
-Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. If the user is already in the workspace a 400 error will be returned. +Get metadata for a pronunciation dictionary
@@ -7386,8 +7363,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.invite_user( - email="john.doe@testmail.com", +client.pronunciation_dictionary.get( + pronunciation_dictionary_id="Fm6AvNgS53NXe6Kqxp3e", ) ``` @@ -7404,15 +7381,7 @@ client.workspace.invite_user(
-**email:** `str` — The email of the customer - -
-
- -
-
- -**group_ids:** `typing.Optional[typing.Sequence[str]]` — The group ids of the user +**pronunciation_dictionary_id:** `str` — The id of the pronunciation dictionary
@@ -7432,7 +7401,7 @@ client.workspace.invite_user(
-
client.workspace.invite_multiple_users(...) +
client.pronunciation_dictionary.get_all(...)
@@ -7444,7 +7413,7 @@ client.workspace.invite_user(
-Sends email invitations to join your workspace to the provided emails. Requires all email addresses to be part of a verified domain. If the users don't have an account they will be prompted to create one. If the users accept these invites they will be added as users to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. +Get a list of the pronunciation dictionaries you have access to and their metadata
@@ -7464,8 +7433,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.invite_multiple_users( - emails=["emails"], +client.pronunciation_dictionary.get_all( + page_size=1, ) ``` @@ -7482,7 +7451,7 @@ client.workspace.invite_multiple_users(
-**emails:** `typing.Sequence[str]` — The email of the customer +**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -7490,7 +7459,7 @@ client.workspace.invite_multiple_users(
-**group_ids:** `typing.Optional[typing.Sequence[str]]` — The group ids of the user +**page_size:** `typing.Optional[int]` — How many pronunciation dictionaries to return at maximum. Can not exceed 100, defaults to 30.
@@ -7510,7 +7479,8 @@ client.workspace.invite_multiple_users(
-
client.workspace.delete_existing_invitation(...) +## Workspace +
client.workspace.search_user_groups(...)
@@ -7522,7 +7492,7 @@ client.workspace.invite_multiple_users(
-Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators. +Searches for user groups in the workspace. Multiple or no groups may be returned.
@@ -7542,8 +7512,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.delete_existing_invitation( - email="john.doe@testmail.com", +client.workspace.search_user_groups( + name="name", ) ``` @@ -7560,7 +7530,7 @@ client.workspace.delete_existing_invitation(
-**email:** `str` — The email of the customer +**name:** `str` — Name of the group to find.
@@ -7580,7 +7550,7 @@ client.workspace.delete_existing_invitation(
-
client.workspace.update_member(...) +
client.workspace.delete_member_from_user_group(...)
@@ -7592,7 +7562,7 @@ client.workspace.delete_existing_invitation(
-Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators. +Removes a member from the specified group. This endpoint may only be called by workspace administrators.
@@ -7612,7 +7582,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.workspace.update_member( +client.workspace.delete_member_from_user_group( + group_id="group_id", email="email", ) @@ -7630,15 +7601,7 @@ client.workspace.update_member(
-**email:** `str` — Email of the target user. - -
-
- -
-
- -**is_locked:** `typing.Optional[bool]` — Whether to lock or unlock the user account. +**group_id:** `str` — The ID of the target group.
@@ -7646,7 +7609,7 @@ client.workspace.update_member(
-**workspace_role:** `typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]` — Role dictating permissions in the workspace. +**email:** `str` — The email of the target workspace member.
@@ -7666,8 +7629,7 @@ client.workspace.update_member(
-## SpeechToText -
client.speech_to_text.convert(...) +
client.workspace.add_member_to_user_group(...)
@@ -7679,7 +7641,7 @@ client.workspace.update_member(
-Transcribe an audio or video file. +Adds a member of your workspace to the specified group. This endpoint may only be called by workspace administrators.
@@ -7699,8 +7661,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.speech_to_text.convert( - model_id="model_id", +client.workspace.add_member_to_user_group( + group_id="group_id", + email="email", ) ``` @@ -7717,49 +7680,7 @@ client.speech_to_text.convert(
-**model_id:** `str` — The ID of the model to use for transcription, currently only 'scribe_v1' is available. - -
-
- -
-
- -**file:** `from __future__ import annotations - -core.File` — See core.File for more documentation - -
-
- -
-
- -**language_code:** `typing.Optional[str]` — An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically. - -
-
- -
-
- -**tag_audio_events:** `typing.Optional[bool]` — Whether to tag audio events like (laughter), (footsteps), etc. in the transcription. - -
-
- -
-
- -**num_speakers:** `typing.Optional[int]` — The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 32. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports. - -
-
- -
-
- -**timestamps_granularity:** `typing.Optional[SpeechToTextConvertRequestTimestampsGranularity]` — The granularity of the timestamps in the transcription. 'word' provides word-level timestamps and 'character' provides character-level timestamps per word. +**group_id:** `str` — The ID of the target group.
@@ -7767,7 +7688,7 @@ core.File` — See core.File for more documentation
-**diarize:** `typing.Optional[bool]` — Whether to annotate which speaker is currently talking in the uploaded file. Enabling this will limit the maximum duration of your inputs to 8 minutes. +**email:** `str` — The email of the target workspace member.
@@ -7787,8 +7708,7 @@ core.File` — See core.File for more documentation
-## ConversationalAi -
client.conversational_ai.get_signed_url(...) +
client.workspace.invite_user(...)
@@ -7800,7 +7720,7 @@ core.File` — See core.File for more documentation
-Get a signed url to start a conversation with an agent with an agent that requires authorization +Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. If the user is already in the workspace a 400 error will be returned.
@@ -7820,8 +7740,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_signed_url( - agent_id="21m00Tcm4TlvDq8ikWAM", +client.workspace.invite_user( + email="john.doe@testmail.com", ) ``` @@ -7838,7 +7758,15 @@ client.conversational_ai.get_signed_url(
-**agent_id:** `str` — The id of the agent you're taking the action on. +**email:** `str` — The email of the customer + +
+
+ +
+
+ +**group_ids:** `typing.Optional[typing.Sequence[str]]` — The group ids of the user
@@ -7858,7 +7786,7 @@ client.conversational_ai.get_signed_url(
-
client.conversational_ai.create_agent(...) +
client.workspace.invite_multiple_users(...)
@@ -7870,7 +7798,7 @@ client.conversational_ai.get_signed_url(
-Create an agent from a config object +Sends email invitations to join your workspace to the provided emails. Requires all email addresses to be part of a verified domain. If the users don't have an account they will be prompted to create one. If the users accept these invites they will be added as users to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators.
@@ -7885,13 +7813,13 @@ Create an agent from a config object
```python -from elevenlabs import ConversationalConfig, ElevenLabs +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.create_agent( - conversation_config=ConversationalConfig(), +client.workspace.invite_multiple_users( + emails=["emails"], ) ``` @@ -7908,23 +7836,7 @@ client.conversational_ai.create_agent(
-**conversation_config:** `ConversationalConfig` — Conversation configuration for an agent - -
-
- -
-
- -**use_tool_ids:** `typing.Optional[bool]` — Use tool ids instead of tools specs from request payload. - -
-
- -
-
- -**platform_settings:** `typing.Optional[AgentPlatformSettingsRequestModel]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. +**emails:** `typing.Sequence[str]` — The email of the customer
@@ -7932,7 +7844,7 @@ client.conversational_ai.create_agent(
-**name:** `typing.Optional[str]` — A name to make the agent easier to find +**group_ids:** `typing.Optional[typing.Sequence[str]]` — The group ids of the user
@@ -7952,7 +7864,7 @@ client.conversational_ai.create_agent(
-
client.conversational_ai.get_agent(...) +
client.workspace.delete_existing_invitation(...)
@@ -7964,7 +7876,7 @@ client.conversational_ai.create_agent(
-Retrieve config for an agent +Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators.
@@ -7984,8 +7896,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_agent( - agent_id="21m00Tcm4TlvDq8ikWAM", +client.workspace.delete_existing_invitation( + email="john.doe@testmail.com", ) ``` @@ -8002,7 +7914,7 @@ client.conversational_ai.get_agent(
-**agent_id:** `str` — The id of an agent. This is returned on agent creation. +**email:** `str` — The email of the customer
@@ -8022,7 +7934,7 @@ client.conversational_ai.get_agent(
-
client.conversational_ai.delete_agent(...) +
client.workspace.update_member(...)
@@ -8034,7 +7946,7 @@ client.conversational_ai.get_agent(
-Delete an agent +Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators.
@@ -8054,8 +7966,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.delete_agent( - agent_id="21m00Tcm4TlvDq8ikWAM", +client.workspace.update_member( + email="email", ) ``` @@ -8072,7 +7984,23 @@ client.conversational_ai.delete_agent(
-**agent_id:** `str` — The id of an agent. This is returned on agent creation. +**email:** `str` — Email of the target user. + +
+
+ +
+
+ +**is_locked:** `typing.Optional[bool]` — Whether to lock or unlock the user account. + +
+
+ +
+
+ +**workspace_role:** `typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole]` — Role dictating permissions in the workspace.
@@ -8092,7 +8020,8 @@ client.conversational_ai.delete_agent(
-
client.conversational_ai.update_agent(...) +## SpeechToText +
client.speech_to_text.convert(...)
@@ -8104,7 +8033,7 @@ client.conversational_ai.delete_agent(
-Patches an Agent settings +Transcribe an audio or video file.
@@ -8124,8 +8053,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.update_agent( - agent_id="21m00Tcm4TlvDq8ikWAM", +client.speech_to_text.convert( + model_id="model_id", ) ``` @@ -8142,7 +8071,7 @@ client.conversational_ai.update_agent(
-**agent_id:** `str` — The id of an agent. This is returned on agent creation. +**model_id:** `str` — The ID of the model to use for transcription, currently only 'scribe_v1' is available.
@@ -8150,7 +8079,9 @@ client.conversational_ai.update_agent(
-**use_tool_ids:** `typing.Optional[bool]` — Use tool ids instead of tools specs from request payload. +**file:** `from __future__ import annotations + +core.File` — See core.File for more documentation
@@ -8158,7 +8089,7 @@ client.conversational_ai.update_agent(
-**conversation_config:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Conversation configuration for an agent +**enable_logging:** `typing.Optional[bool]` — When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers.
@@ -8166,7 +8097,7 @@ client.conversational_ai.update_agent(
-**platform_settings:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. +**language_code:** `typing.Optional[str]` — An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically.
@@ -8174,11 +8105,7 @@ client.conversational_ai.update_agent(
-**secrets:** `typing.Optional[ - typing.Sequence[ - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem - ] -]` — A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones +**tag_audio_events:** `typing.Optional[bool]` — Whether to tag audio events like (laughter), (footsteps), etc. in the transcription.
@@ -8186,7 +8113,23 @@ client.conversational_ai.update_agent(
-**name:** `typing.Optional[str]` — A name to make the agent easier to find +**num_speakers:** `typing.Optional[int]` — The maximum amount of speakers talking in the uploaded file. Can help with predicting who speaks when. The maximum amount of speakers that can be predicted is 32. Defaults to null, in this case the amount of speakers is set to the maximum value the model supports. + +
+
+ +
+
+ +**timestamps_granularity:** `typing.Optional[SpeechToTextConvertRequestTimestampsGranularity]` — The granularity of the timestamps in the transcription. 'word' provides word-level timestamps and 'character' provides character-level timestamps per word. + +
+
+ +
+
+ +**diarize:** `typing.Optional[bool]` — Whether to annotate which speaker is currently talking in the uploaded file.
@@ -8206,7 +8149,8 @@ client.conversational_ai.update_agent(
-
client.conversational_ai.get_agent_widget(...) +## ConversationalAi +
client.conversational_ai.get_signed_url(...)
@@ -8218,7 +8162,7 @@ client.conversational_ai.update_agent(
-Retrieve the widget configuration for an agent +Get a signed url to start a conversation with an agent with an agent that requires authorization
@@ -8238,7 +8182,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_agent_widget( +client.conversational_ai.get_signed_url( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -8256,15 +8200,7 @@ client.conversational_ai.get_agent_widget(
-**agent_id:** `str` — The id of an agent. This is returned on agent creation. - -
-
- -
-
- -**conversation_signature:** `typing.Optional[str]` — An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint +**agent_id:** `str` — The id of the agent you're taking the action on.
@@ -8284,7 +8220,7 @@ client.conversational_ai.get_agent_widget(
-
client.conversational_ai.get_agent_link(...) +
client.conversational_ai.create_agent(...)
@@ -8296,7 +8232,7 @@ client.conversational_ai.get_agent_widget(
-Get the current link used to share the agent with others +Create an agent from a config object
@@ -8311,13 +8247,13 @@ Get the current link used to share the agent with others
```python -from elevenlabs import ElevenLabs +from elevenlabs import ConversationalConfigApiModel, ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_agent_link( - agent_id="21m00Tcm4TlvDq8ikWAM", +client.conversational_ai.create_agent( + conversation_config=ConversationalConfigApiModel(), ) ``` @@ -8334,7 +8270,31 @@ client.conversational_ai.get_agent_link(
-**agent_id:** `str` — The id of an agent. This is returned on agent creation. +**conversation_config:** `ConversationalConfigApiModel` — Conversation configuration for an agent + +
+
+ +
+
+ +**use_tool_ids:** `typing.Optional[bool]` — Use tool ids instead of tools specs from request payload. + +
+
+ +
+
+ +**platform_settings:** `typing.Optional[AgentPlatformSettingsRequestModel]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — A name to make the agent easier to find
@@ -8354,7 +8314,7 @@ client.conversational_ai.get_agent_link(
-
client.conversational_ai.post_agent_avatar(...) +
client.conversational_ai.get_agent(...)
@@ -8366,7 +8326,7 @@ client.conversational_ai.get_agent_link(
-Sets the avatar for an agent displayed in the widget +Retrieve config for an agent
@@ -8386,7 +8346,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.post_agent_avatar( +client.conversational_ai.get_agent( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -8412,16 +8372,6 @@ client.conversational_ai.post_agent_avatar(
-**avatar_file:** `from __future__ import annotations - -core.File` — See core.File for more documentation - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -8434,7 +8384,7 @@ core.File` — See core.File for more documentation
-
client.conversational_ai.add_agent_secret(...) +
client.conversational_ai.delete_agent(...)
@@ -8446,7 +8396,7 @@ core.File` — See core.File for more documentation
-Uploads a file or reference a webpage for the agent to use as part of it's knowledge base +Delete an agent
@@ -8466,10 +8416,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.add_agent_secret( +client.conversational_ai.delete_agent( agent_id="21m00Tcm4TlvDq8ikWAM", - name="MY API KEY", - secret_value="sk_api_12354abc", ) ``` @@ -8494,22 +8442,6 @@ client.conversational_ai.add_agent_secret(
-**name:** `str` — A name to help identify a particular agent secret - -
-
- -
-
- -**secret_value:** `str` — A value to be encrypted and used by the agent - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -8522,7 +8454,7 @@ client.conversational_ai.add_agent_secret(
-
client.conversational_ai.get_agents(...) +
client.conversational_ai.update_agent(...)
@@ -8534,7 +8466,7 @@ client.conversational_ai.add_agent_secret(
-Returns a page of your agents and their metadata. +Patches an Agent settings
@@ -8554,7 +8486,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_agents() +client.conversational_ai.update_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", +) ``` @@ -8570,7 +8504,7 @@ client.conversational_ai.get_agents()
-**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response. +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -8578,7 +8512,7 @@ client.conversational_ai.get_agents()
-**page_size:** `typing.Optional[int]` — How many Agents to return at maximum. Can not exceed 100, defaults to 30. +**use_tool_ids:** `typing.Optional[bool]` — Use tool ids instead of tools specs from request payload.
@@ -8586,7 +8520,23 @@ client.conversational_ai.get_agents()
-**search:** `typing.Optional[str]` — Search by agents name. +**conversation_config:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Conversation configuration for an agent + +
+
+ +
+
+ +**platform_settings:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — A name to make the agent easier to find
@@ -8606,7 +8556,7 @@ client.conversational_ai.get_agents()
-
client.conversational_ai.get_conversations(...) +
client.conversational_ai.get_agent_widget(...)
@@ -8618,7 +8568,7 @@ client.conversational_ai.get_agents()
-Get all conversations of agents that user owns. With option to restrict to a specific agent. +Retrieve the widget configuration for an agent
@@ -8638,7 +8588,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_conversations( +client.conversational_ai.get_agent_widget( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -8656,23 +8606,7 @@ client.conversational_ai.get_conversations(
-**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response. - -
-
- -
-
- -**agent_id:** `typing.Optional[str]` — The id of the agent you're taking the action on. - -
-
- -
-
- -**call_successful:** `typing.Optional[EvaluationSuccessResult]` — The result of the success evaluation +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -8680,7 +8614,7 @@ client.conversational_ai.get_conversations(
-**page_size:** `typing.Optional[int]` — How many conversations to return at maximum. Can not exceed 100, defaults to 30. +**conversation_signature:** `typing.Optional[str]` — An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint
@@ -8700,7 +8634,7 @@ client.conversational_ai.get_conversations(
-
client.conversational_ai.get_conversation(...) +
client.conversational_ai.get_agent_link(...)
@@ -8712,7 +8646,7 @@ client.conversational_ai.get_conversations(
-Get the details of a particular conversation +Get the current link used to share the agent with others
@@ -8732,8 +8666,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_conversation( - conversation_id="21m00Tcm4TlvDq8ikWAM", +client.conversational_ai.get_agent_link( + agent_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -8750,7 +8684,7 @@ client.conversational_ai.get_conversation(
-**conversation_id:** `str` — The id of the conversation you're taking the action on. +**agent_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -8770,7 +8704,7 @@ client.conversational_ai.get_conversation(
-
client.conversational_ai.delete_conversation(...) +
client.conversational_ai.post_agent_avatar(...)
@@ -8782,7 +8716,7 @@ client.conversational_ai.get_conversation(
-Delete a particular conversation +Sets the avatar for an agent displayed in the widget
@@ -8802,8 +8736,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.delete_conversation( - conversation_id="21m00Tcm4TlvDq8ikWAM", +client.conversational_ai.post_agent_avatar( + agent_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -8820,7 +8754,17 @@ client.conversational_ai.delete_conversation(
-**conversation_id:** `str` — The id of the conversation you're taking the action on. +**agent_id:** `str` — The id of an agent. This is returned on agent creation. + +
+
+ +
+
+ +**avatar_file:** `from __future__ import annotations + +core.File` — See core.File for more documentation
@@ -8840,7 +8784,7 @@ client.conversational_ai.delete_conversation(
-
client.conversational_ai.get_conversation_audio(...) +
client.conversational_ai.add_agent_secret(...)
@@ -8852,7 +8796,7 @@ client.conversational_ai.delete_conversation(
-Get the audio recording of a particular conversation +Uploads a file or reference a webpage for the agent to use as part of it's knowledge base
@@ -8872,8 +8816,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_conversation_audio( - conversation_id="21m00Tcm4TlvDq8ikWAM", +client.conversational_ai.add_agent_secret( + agent_id="21m00Tcm4TlvDq8ikWAM", + name="MY API KEY", + secret_value="sk_api_12354abc", ) ``` @@ -8890,7 +8836,23 @@ client.conversational_ai.get_conversation_audio(
-**conversation_id:** `str` — The id of the conversation you're taking the action on. +**agent_id:** `str` — The id of an agent. This is returned on agent creation. + +
+
+ +
+
+ +**name:** `str` — A name to help identify a particular agent secret + +
+
+ +
+
+ +**secret_value:** `str` — A value to be encrypted and used by the agent
@@ -8910,7 +8872,7 @@ client.conversational_ai.get_conversation_audio(
-
client.conversational_ai.post_conversation_feedback(...) +
client.conversational_ai.get_agents(...)
@@ -8922,7 +8884,7 @@ client.conversational_ai.get_conversation_audio(
-Send the feedback for the given conversation +Returns a page of your agents and their metadata.
@@ -8942,10 +8904,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.post_conversation_feedback( - conversation_id="21m00Tcm4TlvDq8ikWAM", - feedback="like", -) +client.conversational_ai.get_agents() ``` @@ -8961,7 +8920,7 @@ client.conversational_ai.post_conversation_feedback(
-**conversation_id:** `str` — The id of the conversation you're taking the action on. +**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -8969,7 +8928,15 @@ client.conversational_ai.post_conversation_feedback(
-**feedback:** `UserFeedbackScore` — Either 'like' or 'dislike' to indicate the feedback for the conversation. +**page_size:** `typing.Optional[int]` — How many Agents to return at maximum. Can not exceed 100, defaults to 30. + +
+
+ +
+
+ +**search:** `typing.Optional[str]` — Search by agents name.
@@ -8989,7 +8956,7 @@ client.conversational_ai.post_conversation_feedback(
-
client.conversational_ai.create_phone_number(...) +
client.conversational_ai.get_conversations(...)
@@ -9001,7 +8968,7 @@ client.conversational_ai.post_conversation_feedback(
-Import Phone Number from Twilio configuration +Get all conversations of agents that user owns. With option to restrict to a specific agent.
@@ -9021,11 +8988,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.create_phone_number( - phone_number="phone_number", - label="label", - sid="sid", - token="token", +client.conversational_ai.get_conversations( + agent_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -9042,7 +9006,7 @@ client.conversational_ai.create_phone_number(
-**phone_number:** `str` — Phone number +**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -9050,7 +9014,7 @@ client.conversational_ai.create_phone_number(
-**label:** `str` — Label for the phone number +**agent_id:** `typing.Optional[str]` — The id of the agent you're taking the action on.
@@ -9058,7 +9022,7 @@ client.conversational_ai.create_phone_number(
-**sid:** `str` — Twilio Account SID +**call_successful:** `typing.Optional[EvaluationSuccessResult]` — The result of the success evaluation
@@ -9066,7 +9030,7 @@ client.conversational_ai.create_phone_number(
-**token:** `str` — Twilio Token +**page_size:** `typing.Optional[int]` — How many conversations to return at maximum. Can not exceed 100, defaults to 30.
@@ -9086,7 +9050,7 @@ client.conversational_ai.create_phone_number(
-
client.conversational_ai.get_phone_number(...) +
client.conversational_ai.get_conversation(...)
@@ -9098,7 +9062,7 @@ client.conversational_ai.create_phone_number(
-Retrieve Phone Number details by ID +Get the details of a particular conversation
@@ -9118,8 +9082,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_phone_number( - phone_number_id="TeaqRRdTcIfIu2i7BYfT", +client.conversational_ai.get_conversation( + conversation_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -9136,7 +9100,7 @@ client.conversational_ai.get_phone_number(
-**phone_number_id:** `str` — The id of an agent. This is returned on agent creation. +**conversation_id:** `str` — The id of the conversation you're taking the action on.
@@ -9156,7 +9120,7 @@ client.conversational_ai.get_phone_number(
-
client.conversational_ai.delete_phone_number(...) +
client.conversational_ai.delete_conversation(...)
@@ -9168,7 +9132,7 @@ client.conversational_ai.get_phone_number(
-Delete Phone Number by ID +Delete a particular conversation
@@ -9188,8 +9152,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.delete_phone_number( - phone_number_id="TeaqRRdTcIfIu2i7BYfT", +client.conversational_ai.delete_conversation( + conversation_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -9206,7 +9170,7 @@ client.conversational_ai.delete_phone_number(
-**phone_number_id:** `str` — The id of an agent. This is returned on agent creation. +**conversation_id:** `str` — The id of the conversation you're taking the action on.
@@ -9226,7 +9190,7 @@ client.conversational_ai.delete_phone_number(
-
client.conversational_ai.update_phone_number(...) +
client.conversational_ai.get_conversation_audio(...)
@@ -9238,7 +9202,7 @@ client.conversational_ai.delete_phone_number(
-Update Phone Number details by ID +Get the audio recording of a particular conversation
@@ -9258,8 +9222,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.update_phone_number( - phone_number_id="TeaqRRdTcIfIu2i7BYfT", +client.conversational_ai.get_conversation_audio( + conversation_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -9276,15 +9240,7 @@ client.conversational_ai.update_phone_number(
-**phone_number_id:** `str` — The id of an agent. This is returned on agent creation. - -
-
- -
-
- -**agent_id:** `typing.Optional[str]` +**conversation_id:** `str` — The id of the conversation you're taking the action on.
@@ -9304,7 +9260,7 @@ client.conversational_ai.update_phone_number(
-
client.conversational_ai.get_phone_numbers() +
client.conversational_ai.post_conversation_feedback(...)
@@ -9316,7 +9272,7 @@ client.conversational_ai.update_phone_number(
-Retrieve all Phone Numbers +Send the feedback for the given conversation
@@ -9336,7 +9292,10 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_phone_numbers() +client.conversational_ai.post_conversation_feedback( + conversation_id="21m00Tcm4TlvDq8ikWAM", + feedback="like", +) ``` @@ -9352,6 +9311,22 @@ client.conversational_ai.get_phone_numbers()
+**conversation_id:** `str` — The id of the conversation you're taking the action on. + +
+
+ +
+
+ +**feedback:** `UserFeedbackScore` — Either 'like' or 'dislike' to indicate the feedback for the conversation. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -9364,7 +9339,7 @@ client.conversational_ai.get_phone_numbers()
-
client.conversational_ai.get_knowledge_base_list(...) +
client.conversational_ai.create_phone_number(...)
@@ -9376,7 +9351,7 @@ client.conversational_ai.get_phone_numbers()
-Get a list of available knowledge base documents +Import Phone Number from Twilio configuration
@@ -9396,7 +9371,12 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_knowledge_base_list() +client.conversational_ai.create_phone_number( + phone_number="phone_number", + label="label", + sid="sid", + token="token", +) ``` @@ -9412,15 +9392,7 @@ client.conversational_ai.get_knowledge_base_list()
-**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response. - -
-
- -
-
- -**page_size:** `typing.Optional[int]` — How many documents to return at maximum. Can not exceed 100, defaults to 30. +**phone_number:** `str` — Phone number
@@ -9428,7 +9400,7 @@ client.conversational_ai.get_knowledge_base_list()
-**search:** `typing.Optional[str]` — If specified, the endpoint returns only such knowledge base documents whose names start with this string. +**label:** `str` — Label for the phone number
@@ -9436,7 +9408,7 @@ client.conversational_ai.get_knowledge_base_list()
-**show_only_owned_documents:** `typing.Optional[bool]` — If set to true, the endpoint will return only documents owned by you (and not shared from somebody else). +**sid:** `str` — Twilio Account SID
@@ -9444,7 +9416,7 @@ client.conversational_ai.get_knowledge_base_list()
-**use_typesense:** `typing.Optional[bool]` — If set to true, the endpoint will use typesense DB to search for the documents). +**token:** `str` — Twilio Auth Token
@@ -9464,7 +9436,7 @@ client.conversational_ai.get_knowledge_base_list()
-
client.conversational_ai.add_to_knowledge_base(...) +
client.conversational_ai.get_phone_number(...)
@@ -9476,7 +9448,7 @@ client.conversational_ai.get_knowledge_base_list()
-Uploads a file or reference a webpage to use as part of the shared knowledge base +Retrieve Phone Number details by ID
@@ -9496,7 +9468,9 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.add_to_knowledge_base() +client.conversational_ai.get_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", +) ``` @@ -9512,17 +9486,77 @@ client.conversational_ai.add_to_knowledge_base()
-**url:** `typing.Optional[str]` — URL to a page of documentation that the agent will have access to in order to interact with users. +**phone_number_id:** `str` — The id of an agent. This is returned on agent creation. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
+ + + + +
+ +
client.conversational_ai.delete_phone_number(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete Phone Number by ID +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.conversational_ai.delete_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters
-**file:** `from __future__ import annotations +
+
-typing.Optional[core.File]` — See core.File for more documentation +**phone_number_id:** `str` — The id of an agent. This is returned on agent creation.
@@ -9542,7 +9576,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
client.conversational_ai.get_knowledge_base_document_by_id(...) +
client.conversational_ai.update_phone_number(...)
@@ -9554,7 +9588,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-Get details about a specific documentation making up the agent's knowledge base +Update Phone Number details by ID
@@ -9574,8 +9608,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_knowledge_base_document_by_id( - documentation_id="21m00Tcm4TlvDq8ikWAM", +client.conversational_ai.update_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", ) ``` @@ -9592,7 +9626,15 @@ client.conversational_ai.get_knowledge_base_document_by_id(
-**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition. +**phone_number_id:** `str` — The id of an agent. This is returned on agent creation. + +
+
+ +
+
+ +**agent_id:** `typing.Optional[str]`
@@ -9612,7 +9654,7 @@ client.conversational_ai.get_knowledge_base_document_by_id(
-
client.conversational_ai.delete_knowledge_base_document(...) +
client.conversational_ai.get_phone_numbers()
@@ -9624,7 +9666,7 @@ client.conversational_ai.get_knowledge_base_document_by_id(
-Delete a document from the knowledge base +Retrieve all Phone Numbers
@@ -9644,9 +9686,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.delete_knowledge_base_document( - documentation_id="21m00Tcm4TlvDq8ikWAM", -) +client.conversational_ai.get_phone_numbers() ``` @@ -9662,14 +9702,6 @@ client.conversational_ai.delete_knowledge_base_document(
-**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition. - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -9682,7 +9714,7 @@ client.conversational_ai.delete_knowledge_base_document(
-
client.conversational_ai.get_dependent_agents(...) +
client.conversational_ai.get_knowledge_base_list(...)
@@ -9694,7 +9726,7 @@ client.conversational_ai.delete_knowledge_base_document(
-Get a list of agents depending on this knowledge base document +Get a list of available knowledge base documents
@@ -9714,9 +9746,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_dependent_agents( - documentation_id="21m00Tcm4TlvDq8ikWAM", -) +client.conversational_ai.get_knowledge_base_list() ``` @@ -9732,7 +9762,7 @@ client.conversational_ai.get_dependent_agents(
-**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition. +**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -9740,7 +9770,7 @@ client.conversational_ai.get_dependent_agents(
-**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response. +**page_size:** `typing.Optional[int]` — How many documents to return at maximum. Can not exceed 100, defaults to 30.
@@ -9748,7 +9778,23 @@ client.conversational_ai.get_dependent_agents(
-**page_size:** `typing.Optional[int]` — How many documents to return at maximum. Can not exceed 100, defaults to 30. +**search:** `typing.Optional[str]` — If specified, the endpoint returns only such knowledge base documents whose names start with this string. + +
+
+ +
+
+ +**show_only_owned_documents:** `typing.Optional[bool]` — If set to true, the endpoint will return only documents owned by you (and not shared from somebody else). + +
+
+ +
+
+ +**use_typesense:** `typing.Optional[bool]` — If set to true, the endpoint will use typesense DB to search for the documents).
@@ -9768,7 +9814,7 @@ client.conversational_ai.get_dependent_agents(
-
client.conversational_ai.get_tools() +
client.conversational_ai.add_to_knowledge_base(...)
@@ -9780,7 +9826,7 @@ client.conversational_ai.get_dependent_agents(
-Get all available tools available in the workspace. +Uploads a file or reference a webpage to use as part of the shared knowledge base
@@ -9800,7 +9846,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_tools() +client.conversational_ai.add_to_knowledge_base() ``` @@ -9816,6 +9862,32 @@ client.conversational_ai.get_tools()
+**name:** `typing.Optional[str]` — A custom, human-readable name for the document. + +
+
+ +
+
+ +**url:** `typing.Optional[str]` — URL to a page of documentation that the agent will have access to in order to interact with users. + +
+
+ +
+
+ +**file:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -9828,7 +9900,7 @@ client.conversational_ai.get_tools()
-
client.conversational_ai.add_tool(...) +
client.conversational_ai.rag_index_status(...)
@@ -9840,7 +9912,7 @@ client.conversational_ai.get_tools()
-Add a new tool to the available tools in the workspace. +In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status.
@@ -9855,26 +9927,14 @@ Add a new tool to the available tools in the workspace.
```python -from elevenlabs import ( - ElevenLabs, - ToolRequestModel, - ToolRequestModelToolConfig_Webhook, - WebhookToolApiSchemaConfig, -) +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.add_tool( - request=ToolRequestModel( - tool_config=ToolRequestModelToolConfig_Webhook( - name="name", - description="description", - api_schema=WebhookToolApiSchemaConfig( - url="url", - ), - ), - ), +client.conversational_ai.rag_index_status( + documentation_id="21m00Tcm4TlvDq8ikWAM", + model="e5_mistral_7b_instruct", ) ``` @@ -9891,7 +9951,23 @@ client.conversational_ai.add_tool(
-**request:** `ToolRequestModel` +**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition. + +
+
+ +
+
+ +**model:** `EmbeddingModelEnum` + +
+
+ +
+
+ +**force_reindex:** `typing.Optional[bool]` — In case the document is indexed and for some reason you want to reindex it, set this param as true.
@@ -9911,7 +9987,7 @@ client.conversational_ai.add_tool(
-
client.conversational_ai.get_tool(...) +
client.conversational_ai.get_knowledge_base_document_by_id(...)
@@ -9923,7 +9999,7 @@ client.conversational_ai.add_tool(
-Get tool that is available in the workspace. +Get details about a specific documentation making up the agent's knowledge base
@@ -9943,8 +10019,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.get_tool( - tool_id="tool_id", +client.conversational_ai.get_knowledge_base_document_by_id( + documentation_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -9961,7 +10037,7 @@ client.conversational_ai.get_tool(
-**tool_id:** `str` — ID of the requested tool. +**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition.
@@ -9981,7 +10057,7 @@ client.conversational_ai.get_tool(
-
client.conversational_ai.remove_tool(...) +
client.conversational_ai.delete_knowledge_base_document(...)
@@ -9993,7 +10069,7 @@ client.conversational_ai.get_tool(
-Delete tool from the workspace. +Delete a document from the knowledge base
@@ -10013,8 +10089,8 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.remove_tool( - tool_id="tool_id", +client.conversational_ai.delete_knowledge_base_document( + documentation_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -10031,7 +10107,7 @@ client.conversational_ai.remove_tool(
-**tool_id:** `str` — ID of the requested tool. +**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition.
@@ -10051,7 +10127,7 @@ client.conversational_ai.remove_tool(
-
client.conversational_ai.update_tool(...) +
client.conversational_ai.get_dependent_agents(...)
@@ -10063,7 +10139,7 @@ client.conversational_ai.remove_tool(
-Update tool that is available in the workspace. +Get a list of agents depending on this knowledge base document
@@ -10078,27 +10154,13 @@ Update tool that is available in the workspace.
```python -from elevenlabs import ( - ElevenLabs, - ToolRequestModel, - ToolRequestModelToolConfig_Webhook, - WebhookToolApiSchemaConfig, -) +from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.update_tool( - tool_id="tool_id", - request=ToolRequestModel( - tool_config=ToolRequestModelToolConfig_Webhook( - name="name", - description="description", - api_schema=WebhookToolApiSchemaConfig( - url="url", - ), - ), - ), +client.conversational_ai.get_dependent_agents( + documentation_id="21m00Tcm4TlvDq8ikWAM", ) ``` @@ -10115,7 +10177,15 @@ client.conversational_ai.update_tool(
-**tool_id:** `str` — ID of the requested tool. +**documentation_id:** `str` — The id of a document from the knowledge base. This is returned on document addition. + +
+
+ +
+
+ +**cursor:** `typing.Optional[str]` — Used for fetching next page. Cursor is returned in the response.
@@ -10123,7 +10193,7 @@ client.conversational_ai.update_tool(
-**request:** `ToolRequestModel` +**page_size:** `typing.Optional[int]` — How many documents to return at maximum. Can not exceed 100, defaults to 30.
@@ -10231,21 +10301,11 @@ Update Convai settings for the workspace ```python from elevenlabs import ElevenLabs -from elevenlabs.conversational_ai import ( - PatchConvaiSettingsRequestSecretsItem_New, -) client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.conversational_ai.update_settings( - secrets=[ - PatchConvaiSettingsRequestSecretsItem_New( - name="name", - value="value", - ) - ], -) +client.conversational_ai.update_settings() ```
@@ -10261,14 +10321,6 @@ client.conversational_ai.update_settings(
-**secrets:** `typing.Sequence[PatchConvaiSettingsRequestSecretsItem]` - -
-
- -
-
- **conversation_initiation_client_data_webhook:** `typing.Optional[ConversationInitiationClientDataWebhook]`
@@ -10309,7 +10361,7 @@ client.conversational_ai.update_settings(
-Get all secrets for the workspace +Get all workspace secrets for the user
@@ -10432,6 +10484,76 @@ client.conversational_ai.create_secret(
+ +
+
+ +
client.conversational_ai.delete_secret(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a workspace secret if it's not in use +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from elevenlabs import ElevenLabs + +client = ElevenLabs( + api_key="YOUR_API_KEY", +) +client.conversational_ai.delete_secret( + secret_id="secret_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**secret_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
@@ -10739,8 +10861,8 @@ ultra lossless - ultra quality output format, 705.6kbps with 44.1kHz sample rate This parameter controls text normalization with four modes: 'auto', 'on', 'apply_english' and 'off'. - When set to 'auto', the system will automatically decide whether to apply text normalization - (e.g., spelling out numbers). With 'on', text normalization will always be applied, while + When set to 'auto', the system will automatically decide whether to apply text normalization + (e.g., spelling out numbers). With 'on', text normalization will always be applied, while with 'off', it will be skipped. 'apply_english' is the same as 'on' but will assume that text is in English. @@ -10860,7 +10982,7 @@ client.studio.projects.get(
-Updates Studio project metadata. +Updates the specified Studio project by setting the values of the parameters passed.
@@ -11225,7 +11347,7 @@ client.studio.projects.convert(
-Gets the snapshots of a Studio project. +Retrieves a list of snapshots for a Studio project.
@@ -11283,7 +11405,7 @@ client.studio.projects.get_snapshots(
-
client.studio.projects.stream_audio(...) +
client.studio.projects.get_project_snapshot(...)
@@ -11295,7 +11417,7 @@ client.studio.projects.get_snapshots(
-Stream the audio from a Studio project snapshot. +Returns the project snapshot.
@@ -11315,7 +11437,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.studio.projects.stream_audio( +client.studio.projects.get_project_snapshot( project_id="21m00Tcm4TlvDq8ikWAM", project_snapshot_id="21m00Tcm4TlvDq8ikWAM", ) @@ -11350,14 +11472,6 @@ client.studio.projects.stream_audio(
-**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format. - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -11370,7 +11484,7 @@ client.studio.projects.stream_audio(
-
client.studio.projects.stream_archive(...) +
client.studio.projects.stream_audio(...)
@@ -11382,7 +11496,7 @@ client.studio.projects.stream_audio(
-Returns a compressed archive of the Studio project's audio. +Stream the audio from a Studio project snapshot.
@@ -11402,7 +11516,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.studio.projects.stream_archive( +client.studio.projects.stream_audio( project_id="21m00Tcm4TlvDq8ikWAM", project_snapshot_id="21m00Tcm4TlvDq8ikWAM", ) @@ -11437,6 +11551,14 @@ client.studio.projects.stream_archive(
+**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format. + +
+
+ +
+
+ **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -12043,7 +12165,7 @@ client.studio.chapters.convert(
-Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created. +Gets information about all the snapshots of a chapter. Each snapshot can be downloaded as audio. Whenever a chapter is converted a snapshot will automatically be created.
@@ -12110,7 +12232,7 @@ client.studio.chapters.get_all_snapshots(
-
client.studio.chapters.stream_snapshot(...) +
client.studio.chapters.get_chapter_snapshot(...)
@@ -12122,7 +12244,7 @@ client.studio.chapters.get_all_snapshots(
-Stream the audio from a chapter snapshot. Use `GET /v1/studio/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the snapshots of a chapter. +Returns the chapter snapshot.
@@ -12142,7 +12264,7 @@ from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) -client.studio.chapters.stream_snapshot( +client.studio.chapters.get_chapter_snapshot( project_id="21m00Tcm4TlvDq8ikWAM", chapter_id="21m00Tcm4TlvDq8ikWAM", chapter_snapshot_id="21m00Tcm4TlvDq8ikWAM", @@ -12186,14 +12308,6 @@ client.studio.chapters.stream_snapshot(
-**convert_to_mpeg:** `typing.Optional[bool]` — Whether to convert the audio to mpeg format. - -
-
- -
-
- **request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
diff --git a/src/elevenlabs/__init__.py b/src/elevenlabs/__init__.py index 5d7545f3..ca3f011c 100644 --- a/src/elevenlabs/__init__.py +++ b/src/elevenlabs/__init__.py @@ -8,12 +8,15 @@ AddProjectResponseModel, AddPronunciationDictionaryResponseModel, AddPronunciationDictionaryRulesResponseModel, + AddSharingVoiceRequest, AddVoiceIvcResponseModel, AddVoiceResponseModel, + AddWorkspaceGroupMemberResponseModel, + AddWorkspaceInviteResponseModel, Age, AgentBan, AgentCallLimits, - AgentConfig, + AgentConfigApiModel, AgentConfigOverride, AgentConfigOverrideConfig, AgentMetadataResponseModel, @@ -48,22 +51,22 @@ ChapterContentParagraphTtsNodeInputModel, ChapterContentResponseModel, ChapterResponse, + ChapterSnapshotExtendedResponseModel, ChapterSnapshotResponse, ChapterSnapshotsResponse, ChapterState, ChapterStatisticsResponse, ChapterWithContentResponseModel, ChapterWithContentResponseModelState, + CharacterAlignmentModel, CharacterAlignmentResponseModel, ClientEvent, ClientToolConfig, - ConvAiNewSecretConfig, ConvAiSecretLocator, - ConvAiStoredSecretConfig, ConvAiStoredSecretDependencies, - ConvAiStoredSecretDependenciesAgentsItem, - ConvAiStoredSecretDependenciesAgentsItem_Available, - ConvAiStoredSecretDependenciesAgentsItem_Unknown, + ConvAiStoredSecretDependenciesAgentToolsItem, + ConvAiStoredSecretDependenciesAgentToolsItem_Available, + ConvAiStoredSecretDependenciesAgentToolsItem_Unknown, ConvAiStoredSecretDependenciesToolsItem, ConvAiStoredSecretDependenciesToolsItem_Available, ConvAiStoredSecretDependenciesToolsItem_Unknown, @@ -92,27 +95,48 @@ ConversationSummaryResponseModelStatus, ConversationTokenDbModel, ConversationTokenPurpose, - ConversationalConfig, + ConversationalConfigApiModel, + ConvertChapterResponseModel, + ConvertProjectResponseModel, CreateAgentResponseModel, + CreateAudioNativeProjectRequest, CreatePhoneNumberResponseModel, + CreatePronunciationDictionaryResponseModel, Currency, CustomLlm, DataCollectionResultCommonModel, + DeleteChapterResponseModel, + DeleteDubbingResponseModel, + DeleteProjectResponseModel, DeleteSampleResponseModel, + DeleteVoiceResponseModel, + DeleteWorkspaceGroupMemberResponseModel, + DeleteWorkspaceInviteResponseModel, DependentAvailableAgentIdentifier, DependentAvailableAgentIdentifierAccessLevel, + DependentAvailableAgentToolIdentifier, + DependentAvailableAgentToolIdentifierAccessLevel, DependentAvailableToolIdentifier, DependentAvailableToolIdentifierAccessLevel, + DependentPhoneNumberIdentifier, DependentUnknownAgentIdentifier, + DependentUnknownAgentToolIdentifier, DependentUnknownToolIdentifier, DoDubbingResponse, + DocumentUsageModeEnum, + DubbedSegment, DubbingMediaMetadata, + DubbingMediaReference, DubbingMetadataResponse, + DubbingResource, DynamicVariablesConfig, DynamicVariablesConfigDynamicVariablePlaceholdersValue, EditChapterResponseModel, EditProjectResponseModel, + EditVoiceResponseModel, + EditVoiceSettingsResponseModel, EmbedVariant, + EmbeddingModelEnum, EvaluationSettings, EvaluationSuccessResult, ExtendedSubscriptionResponseModelBillingPeriod, @@ -128,7 +152,7 @@ GetAgentsPageResponseModel, GetAudioNativeProjectSettingsResponseModel, GetChaptersResponse, - GetConvaiSettingsResponseModel, + GetConvAiSettingsResponseModel, GetConversationResponseModel, GetConversationResponseModelStatus, GetConversationsPageResponseModel, @@ -160,8 +184,10 @@ HttpValidationError, ImageAvatar, Invoice, + KnowledgeBaseDocumentMetadataResponseModel, KnowledgeBaseLocator, KnowledgeBaseLocatorType, + LanguageAddedResponse, LanguagePreset, LanguagePresetTranslation, LanguageResponse, @@ -182,7 +208,6 @@ ObjectJsonSchemaPropertyPropertiesValue, OrbAvatar, OutputFormat, - PaginatedListedReviewTaskInstanceModel, PhoneNumberAgentInfo, PodcastBulletinMode, PodcastBulletinModeData, @@ -203,11 +228,14 @@ ProjectExtendedResponseModelApplyTextNormalization, ProjectExtendedResponseModelFiction, ProjectExtendedResponseModelQualityPreset, + ProjectExtendedResponseModelSourceType, ProjectExtendedResponseModelTargetAudience, ProjectResponse, ProjectResponseModelAccessLevel, ProjectResponseModelFiction, + ProjectResponseModelSourceType, ProjectResponseModelTargetAudience, + ProjectSnapshotExtendedResponseModel, ProjectSnapshotResponse, ProjectSnapshotUploadResponseModel, ProjectSnapshotUploadResponseModelStatus, @@ -227,22 +255,29 @@ PronunciationDictionaryVersionResponseModel, PydanticPronunciationDictionaryVersionLocator, QueryParamsJsonSchema, - QuoteRequestModel, - QuoteResponseModel, + RagConfig, + RagIndexResponseModel, + RagIndexStatus, ReaderResourceResponseModel, ReaderResourceResponseModelResourceType, RecordingResponse, RemovePronunciationDictionaryRulesResponseModel, ResourceAccessInfo, ResourceAccessInfoRole, - ReviewState, ReviewStatus, - ReviewTaskInstanceResponseModel, SafetyCommonModel, SafetyEvaluation, SafetyResponseModel, SafetyRule, SecretDependencyType, + SegmentCreateResponse, + SegmentDeleteResponse, + SegmentDubResponse, + SegmentTranscriptionResponse, + SegmentTranslationResponse, + SegmentUpdateResponse, + SpeakerSegment, + SpeakerTrack, SpeechHistoryItemResponse, SpeechHistoryItemResponseModelSource, SpeechHistoryItemResponseModelVoiceCategory, @@ -259,26 +294,8 @@ SubscriptionStatus, SubscriptionUsageResponseModel, SystemToolConfig, - TagKind, - TagModel, - TaskInstanceEventKind, - TaskInstanceEventResponseModel, TelephonyProvider, TextToSpeechAsStreamRequest, - ToolRequestModel, - ToolRequestModelToolConfig, - ToolRequestModelToolConfig_Client, - ToolRequestModelToolConfig_System, - ToolRequestModelToolConfig_Webhook, - ToolResponseModel, - ToolResponseModelDependentAgentsItem, - ToolResponseModelDependentAgentsItem_Available, - ToolResponseModelDependentAgentsItem_Unknown, - ToolResponseModelToolConfig, - ToolResponseModelToolConfig_Client, - ToolResponseModelToolConfig_System, - ToolResponseModelToolConfig_Webhook, - ToolsResponseModel, TtsConversationalConfig, TtsConversationalConfigOverride, TtsConversationalConfigOverrideConfig, @@ -287,6 +304,7 @@ TtsOutputFormat, TurnConfig, TurnMode, + UpdateWorkspaceMemberResponseModel, UrlAvatar, UsageCharactersResponseModel, User, @@ -328,7 +346,7 @@ WidgetFeedbackMode, WorkspaceGroupByNameResponseModel, ) -from .errors import UnprocessableEntityError +from .errors import ForbiddenError, NotFoundError, TooEarlyError, UnprocessableEntityError from . import ( audio_isolation, audio_native, @@ -352,14 +370,6 @@ workspace, ) from .client import AsyncElevenLabs, ElevenLabs -from .conversational_ai import ( - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem, - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New, - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored, - PatchConvaiSettingsRequestSecretsItem, - PatchConvaiSettingsRequestSecretsItem_New, - PatchConvaiSettingsRequestSecretsItem_Stored, -) from .dubbing import DubbingGetTranscriptForDubRequestFormatType from .environment import ElevenLabsEnvironment from .history import HistoryGetAllRequestSource @@ -396,6 +406,7 @@ BodyCreatePodcastV1StudioPodcastsPostSourceItem_Text, BodyCreatePodcastV1StudioPodcastsPostSourceItem_Url, ) +from .text_to_sound_effects import TextToSoundEffectsConvertRequestOutputFormat from .text_to_speech import ( BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization, BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization, @@ -404,6 +415,7 @@ ) from .text_to_voice import TextToVoiceCreatePreviewsRequestOutputFormat from .version import __version__ +from .voices import VoicesGetSharedRequestCategory from .workspace import BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole __all__ = [ @@ -417,12 +429,15 @@ "AddProjectV1ProjectsAddPostRequestTargetAudience", "AddPronunciationDictionaryResponseModel", "AddPronunciationDictionaryRulesResponseModel", + "AddSharingVoiceRequest", "AddVoiceIvcResponseModel", "AddVoiceResponseModel", + "AddWorkspaceGroupMemberResponseModel", + "AddWorkspaceInviteResponseModel", "Age", "AgentBan", "AgentCallLimits", - "AgentConfig", + "AgentConfigApiModel", "AgentConfigOverride", "AgentConfigOverrideConfig", "AgentMetadataResponseModel", @@ -464,9 +479,6 @@ "BodyCreatePodcastV1StudioPodcastsPostSourceItem", "BodyCreatePodcastV1StudioPodcastsPostSourceItem_Text", "BodyCreatePodcastV1StudioPodcastsPostSourceItem_Url", - "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem", - "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New", - "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored", "BodyTextToSpeechStreamingV1TextToSpeechVoiceIdStreamPostApplyTextNormalization", "BodyTextToSpeechStreamingWithTimestampsV1TextToSpeechVoiceIdStreamWithTimestampsPostApplyTextNormalization", "BodyTextToSpeechV1TextToSpeechVoiceIdPostApplyTextNormalization", @@ -484,22 +496,22 @@ "ChapterContentParagraphTtsNodeInputModel", "ChapterContentResponseModel", "ChapterResponse", + "ChapterSnapshotExtendedResponseModel", "ChapterSnapshotResponse", "ChapterSnapshotsResponse", "ChapterState", "ChapterStatisticsResponse", "ChapterWithContentResponseModel", "ChapterWithContentResponseModelState", + "CharacterAlignmentModel", "CharacterAlignmentResponseModel", "ClientEvent", "ClientToolConfig", - "ConvAiNewSecretConfig", "ConvAiSecretLocator", - "ConvAiStoredSecretConfig", "ConvAiStoredSecretDependencies", - "ConvAiStoredSecretDependenciesAgentsItem", - "ConvAiStoredSecretDependenciesAgentsItem_Available", - "ConvAiStoredSecretDependenciesAgentsItem_Unknown", + "ConvAiStoredSecretDependenciesAgentToolsItem", + "ConvAiStoredSecretDependenciesAgentToolsItem_Available", + "ConvAiStoredSecretDependenciesAgentToolsItem_Unknown", "ConvAiStoredSecretDependenciesToolsItem", "ConvAiStoredSecretDependenciesToolsItem_Available", "ConvAiStoredSecretDependenciesToolsItem_Unknown", @@ -528,30 +540,51 @@ "ConversationSummaryResponseModelStatus", "ConversationTokenDbModel", "ConversationTokenPurpose", - "ConversationalConfig", + "ConversationalConfigApiModel", + "ConvertChapterResponseModel", + "ConvertProjectResponseModel", "CreateAgentResponseModel", + "CreateAudioNativeProjectRequest", "CreatePhoneNumberResponseModel", + "CreatePronunciationDictionaryResponseModel", "Currency", "CustomLlm", "DataCollectionResultCommonModel", + "DeleteChapterResponseModel", + "DeleteDubbingResponseModel", + "DeleteProjectResponseModel", "DeleteSampleResponseModel", + "DeleteVoiceResponseModel", + "DeleteWorkspaceGroupMemberResponseModel", + "DeleteWorkspaceInviteResponseModel", "DependentAvailableAgentIdentifier", "DependentAvailableAgentIdentifierAccessLevel", + "DependentAvailableAgentToolIdentifier", + "DependentAvailableAgentToolIdentifierAccessLevel", "DependentAvailableToolIdentifier", "DependentAvailableToolIdentifierAccessLevel", + "DependentPhoneNumberIdentifier", "DependentUnknownAgentIdentifier", + "DependentUnknownAgentToolIdentifier", "DependentUnknownToolIdentifier", "DoDubbingResponse", + "DocumentUsageModeEnum", + "DubbedSegment", "DubbingGetTranscriptForDubRequestFormatType", "DubbingMediaMetadata", + "DubbingMediaReference", "DubbingMetadataResponse", + "DubbingResource", "DynamicVariablesConfig", "DynamicVariablesConfigDynamicVariablePlaceholdersValue", "EditChapterResponseModel", "EditProjectResponseModel", + "EditVoiceResponseModel", + "EditVoiceSettingsResponseModel", "ElevenLabs", "ElevenLabsEnvironment", "EmbedVariant", + "EmbeddingModelEnum", "EvaluationSettings", "EvaluationSuccessResult", "ExtendedSubscriptionResponseModelBillingPeriod", @@ -560,6 +593,7 @@ "FeedbackItem", "FineTuningResponse", "FineTuningResponseModelStateValue", + "ForbiddenError", "Gender", "GetAgentEmbedResponseModel", "GetAgentLinkResponseModel", @@ -567,7 +601,7 @@ "GetAgentsPageResponseModel", "GetAudioNativeProjectSettingsResponseModel", "GetChaptersResponse", - "GetConvaiSettingsResponseModel", + "GetConvAiSettingsResponseModel", "GetConversationResponseModel", "GetConversationResponseModelStatus", "GetConversationsPageResponseModel", @@ -600,8 +634,10 @@ "HttpValidationError", "ImageAvatar", "Invoice", + "KnowledgeBaseDocumentMetadataResponseModel", "KnowledgeBaseLocator", "KnowledgeBaseLocatorType", + "LanguageAddedResponse", "LanguagePreset", "LanguagePresetTranslation", "LanguageResponse", @@ -618,14 +654,11 @@ "ModerationStatusResponseModel", "ModerationStatusResponseModelSafetyStatus", "ModerationStatusResponseModelWarningStatus", + "NotFoundError", "ObjectJsonSchemaProperty", "ObjectJsonSchemaPropertyPropertiesValue", "OrbAvatar", "OutputFormat", - "PaginatedListedReviewTaskInstanceModel", - "PatchConvaiSettingsRequestSecretsItem", - "PatchConvaiSettingsRequestSecretsItem_New", - "PatchConvaiSettingsRequestSecretsItem_Stored", "PhoneNumberAgentInfo", "PodcastBulletinMode", "PodcastBulletinModeData", @@ -646,11 +679,14 @@ "ProjectExtendedResponseModelApplyTextNormalization", "ProjectExtendedResponseModelFiction", "ProjectExtendedResponseModelQualityPreset", + "ProjectExtendedResponseModelSourceType", "ProjectExtendedResponseModelTargetAudience", "ProjectResponse", "ProjectResponseModelAccessLevel", "ProjectResponseModelFiction", + "ProjectResponseModelSourceType", "ProjectResponseModelTargetAudience", + "ProjectSnapshotExtendedResponseModel", "ProjectSnapshotResponse", "ProjectSnapshotUploadResponseModel", "ProjectSnapshotUploadResponseModelStatus", @@ -674,22 +710,29 @@ "PronunciationDictionaryVersionResponseModel", "PydanticPronunciationDictionaryVersionLocator", "QueryParamsJsonSchema", - "QuoteRequestModel", - "QuoteResponseModel", + "RagConfig", + "RagIndexResponseModel", + "RagIndexStatus", "ReaderResourceResponseModel", "ReaderResourceResponseModelResourceType", "RecordingResponse", "RemovePronunciationDictionaryRulesResponseModel", "ResourceAccessInfo", "ResourceAccessInfoRole", - "ReviewState", "ReviewStatus", - "ReviewTaskInstanceResponseModel", "SafetyCommonModel", "SafetyEvaluation", "SafetyResponseModel", "SafetyRule", "SecretDependencyType", + "SegmentCreateResponse", + "SegmentDeleteResponse", + "SegmentDubResponse", + "SegmentTranscriptionResponse", + "SegmentTranslationResponse", + "SegmentUpdateResponse", + "SpeakerSegment", + "SpeakerTrack", "SpeechHistoryItemResponse", "SpeechHistoryItemResponseModelSource", "SpeechHistoryItemResponseModelVoiceCategory", @@ -707,27 +750,11 @@ "SubscriptionStatus", "SubscriptionUsageResponseModel", "SystemToolConfig", - "TagKind", - "TagModel", - "TaskInstanceEventKind", - "TaskInstanceEventResponseModel", "TelephonyProvider", + "TextToSoundEffectsConvertRequestOutputFormat", "TextToSpeechAsStreamRequest", "TextToVoiceCreatePreviewsRequestOutputFormat", - "ToolRequestModel", - "ToolRequestModelToolConfig", - "ToolRequestModelToolConfig_Client", - "ToolRequestModelToolConfig_System", - "ToolRequestModelToolConfig_Webhook", - "ToolResponseModel", - "ToolResponseModelDependentAgentsItem", - "ToolResponseModelDependentAgentsItem_Available", - "ToolResponseModelDependentAgentsItem_Unknown", - "ToolResponseModelToolConfig", - "ToolResponseModelToolConfig_Client", - "ToolResponseModelToolConfig_System", - "ToolResponseModelToolConfig_Webhook", - "ToolsResponseModel", + "TooEarlyError", "TtsConversationalConfig", "TtsConversationalConfigOverride", "TtsConversationalConfigOverrideConfig", @@ -737,6 +764,7 @@ "TurnConfig", "TurnMode", "UnprocessableEntityError", + "UpdateWorkspaceMemberResponseModel", "UrlAvatar", "UsageCharactersResponseModel", "User", @@ -760,6 +788,7 @@ "VoiceSharingResponseModelCategory", "VoiceSharingState", "VoiceVerificationResponse", + "VoicesGetSharedRequestCategory", "WebhookToolApiSchemaConfig", "WebhookToolApiSchemaConfigMethod", "WebhookToolApiSchemaConfigRequestHeadersValue", diff --git a/src/elevenlabs/audio_native/client.py b/src/elevenlabs/audio_native/client.py index 807f683e..1e8eeb0c 100644 --- a/src/elevenlabs/audio_native/client.py +++ b/src/elevenlabs/audio_native/client.py @@ -41,7 +41,7 @@ def create( request_options: typing.Optional[RequestOptions] = None, ) -> AudioNativeCreateProjectResponseModel: """ - Creates AudioNative enabled project, optionally starts conversion and returns project id and embeddable html snippet. + Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet. Parameters ---------- @@ -310,7 +310,7 @@ async def create( request_options: typing.Optional[RequestOptions] = None, ) -> AudioNativeCreateProjectResponseModel: """ - Creates AudioNative enabled project, optionally starts conversion and returns project id and embeddable html snippet. + Creates Audio Native enabled project, optionally starts conversion and returns project ID and embeddable HTML snippet. Parameters ---------- diff --git a/src/elevenlabs/base_client.py b/src/elevenlabs/base_client.py index 7b7a469b..14263765 100644 --- a/src/elevenlabs/base_client.py +++ b/src/elevenlabs/base_client.py @@ -25,18 +25,6 @@ from .workspace.client import WorkspaceClient from .speech_to_text.client import SpeechToTextClient from .conversational_ai.client import ConversationalAiClient -from .core.request_options import RequestOptions -from .core.jsonable_encoder import jsonable_encoder -from .core.unchecked_base_model import construct_type -from .errors.unprocessable_entity_error import UnprocessableEntityError -from .types.http_validation_error import HttpValidationError -from json.decoder import JSONDecodeError -from .core.api_error import ApiError -from .types.tag_model import TagModel -from .core.serialization import convert_and_respect_annotation_metadata -from .types.paginated_listed_review_task_instance_model import PaginatedListedReviewTaskInstanceModel -from .types.quote_request_model import QuoteRequestModel -from .types.quote_response_model import QuoteResponseModel from .core.client_wrapper import AsyncClientWrapper from .history.client import AsyncHistoryClient from .text_to_sound_effects.client import AsyncTextToSoundEffectsClient @@ -59,9 +47,6 @@ from .speech_to_text.client import AsyncSpeechToTextClient from .conversational_ai.client import AsyncConversationalAiClient -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - class BaseElevenLabs: """ @@ -142,354 +127,6 @@ def __init__( self.speech_to_text = SpeechToTextClient(client_wrapper=self._client_wrapper) self.conversational_ai = ConversationalAiClient(client_wrapper=self._client_wrapper) - def claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post( - self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: - """ - Parameters - ---------- - task_id : str - The ID task to claim. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[typing.Any] - Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post( - task_id="task_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - f"v1/speech-to-text/reviews/tasks/{jsonable_encoder(task_id)}/claim", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put( - self, user_id: str, task_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: - """ - Parameters - ---------- - user_id : str - - task_id : str - The ID task review to claim. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[typing.Any] - Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put( - user_id="user_id", - task_id="task_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - f"v1/speech-to-text/reviews/producers/{jsonable_encoder(user_id)}/tasks/{jsonable_encoder(task_id)}/submit", - method="PUT", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post( - self, - *, - tags: typing.Sequence[typing.Sequence[TagModel]], - page_size: typing.Optional[int] = None, - cursor: typing.Optional[str] = None, - unclaimed_only: typing.Optional[bool] = OMIT, - include_instances: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: - """ - Parameters - ---------- - tags : typing.Sequence[typing.Sequence[TagModel]] - - page_size : typing.Optional[int] - The number of tasks to return per page. - - cursor : typing.Optional[str] - Cursor for pagination, using the cursor from the previous page. - - unclaimed_only : typing.Optional[bool] - - include_instances : typing.Optional[bool] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[typing.Any] - Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs, TagModel - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post( - tags=[ - [ - TagModel( - kind="lang", - value="value", - ) - ] - ], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v1/speech-to-text/reviews/tasks", - method="POST", - params={ - "page_size": page_size, - "cursor": cursor, - }, - json={ - "tags": convert_and_respect_annotation_metadata( - object_=tags, annotation=typing.Sequence[typing.Sequence[TagModel]], direction="write" - ), - "unclaimed_only": unclaimed_only, - "include_instances": include_instances, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get( - self, - user_id: str, - *, - page_size: typing.Optional[int] = None, - cursor: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedListedReviewTaskInstanceModel: - """ - Parameters - ---------- - user_id : str - - page_size : typing.Optional[int] - The number of tasks to return per page. - - cursor : typing.Optional[str] - Cursor for pagination, using the cursor from the previous page. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PaginatedListedReviewTaskInstanceModel - Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get( - user_id="user_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - f"v1/speech-to-text/reviews/producers/{jsonable_encoder(user_id)}/tasks", - method="GET", - params={ - "page_size": page_size, - "cursor": cursor, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PaginatedListedReviewTaskInstanceModel, - construct_type( - type_=PaginatedListedReviewTaskInstanceModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post( - self, *, request: QuoteRequestModel, request_options: typing.Optional[RequestOptions] = None - ) -> QuoteResponseModel: - """ - Parameters - ---------- - request : QuoteRequestModel - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - QuoteResponseModel - Successful Response - - Examples - -------- - from elevenlabs import ElevenLabs, QuoteRequestModel - - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post( - request=QuoteRequestModel( - content_hash="content_hash", - duration_s=1.1, - speaker_count=1, - language="language", - ), - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v1/speech-to-text/reviews/get-quote", - method="POST", - json=convert_and_respect_annotation_metadata( - object_=request, annotation=QuoteRequestModel, direction="write" - ), - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - QuoteResponseModel, - construct_type( - type_=QuoteResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - class AsyncBaseElevenLabs: """ @@ -570,394 +207,6 @@ def __init__( self.speech_to_text = AsyncSpeechToTextClient(client_wrapper=self._client_wrapper) self.conversational_ai = AsyncConversationalAiClient(client_wrapper=self._client_wrapper) - async def claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post( - self, task_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: - """ - Parameters - ---------- - task_id : str - The ID task to claim. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[typing.Any] - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.claim_a_task_v_1_speech_to_text_reviews_tasks_task_id_claim_post( - task_id="task_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v1/speech-to-text/reviews/tasks/{jsonable_encoder(task_id)}/claim", - method="POST", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put( - self, user_id: str, task_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: - """ - Parameters - ---------- - user_id : str - - task_id : str - The ID task review to claim. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[typing.Any] - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.submit_a_completed_task_v_1_speech_to_text_reviews_producers_user_id_tasks_task_id_submit_put( - user_id="user_id", - task_id="task_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v1/speech-to-text/reviews/producers/{jsonable_encoder(user_id)}/tasks/{jsonable_encoder(task_id)}/submit", - method="PUT", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post( - self, - *, - tags: typing.Sequence[typing.Sequence[TagModel]], - page_size: typing.Optional[int] = None, - cursor: typing.Optional[str] = None, - unclaimed_only: typing.Optional[bool] = OMIT, - include_instances: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: - """ - Parameters - ---------- - tags : typing.Sequence[typing.Sequence[TagModel]] - - page_size : typing.Optional[int] - The number of tasks to return per page. - - cursor : typing.Optional[str] - Cursor for pagination, using the cursor from the previous page. - - unclaimed_only : typing.Optional[bool] - - include_instances : typing.Optional[bool] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.Optional[typing.Any] - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs, TagModel - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.list_unclaimed_reviews_v_1_speech_to_text_reviews_tasks_post( - tags=[ - [ - TagModel( - kind="lang", - value="value", - ) - ] - ], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v1/speech-to-text/reviews/tasks", - method="POST", - params={ - "page_size": page_size, - "cursor": cursor, - }, - json={ - "tags": convert_and_respect_annotation_metadata( - object_=tags, annotation=typing.Sequence[typing.Sequence[TagModel]], direction="write" - ), - "unclaimed_only": unclaimed_only, - "include_instances": include_instances, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.Optional[typing.Any], - construct_type( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get( - self, - user_id: str, - *, - page_size: typing.Optional[int] = None, - cursor: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> PaginatedListedReviewTaskInstanceModel: - """ - Parameters - ---------- - user_id : str - - page_size : typing.Optional[int] - The number of tasks to return per page. - - cursor : typing.Optional[str] - Cursor for pagination, using the cursor from the previous page. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PaginatedListedReviewTaskInstanceModel - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.list_tasks_instances_for_a_user_v_1_speech_to_text_reviews_producers_user_id_tasks_get( - user_id="user_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v1/speech-to-text/reviews/producers/{jsonable_encoder(user_id)}/tasks", - method="GET", - params={ - "page_size": page_size, - "cursor": cursor, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - PaginatedListedReviewTaskInstanceModel, - construct_type( - type_=PaginatedListedReviewTaskInstanceModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post( - self, *, request: QuoteRequestModel, request_options: typing.Optional[RequestOptions] = None - ) -> QuoteResponseModel: - """ - Parameters - ---------- - request : QuoteRequestModel - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - QuoteResponseModel - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs, QuoteRequestModel - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.compute_a_quote_for_a_asr_transcription_review_task_v_1_speech_to_text_reviews_get_quote_post( - request=QuoteRequestModel( - content_hash="content_hash", - duration_s=1.1, - speaker_count=1, - language="language", - ), - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v1/speech-to-text/reviews/get-quote", - method="POST", - json=convert_and_respect_annotation_metadata( - object_=request, annotation=QuoteRequestModel, direction="write" - ), - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - QuoteResponseModel, - construct_type( - type_=QuoteResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - def _get_base_url(*, base_url: typing.Optional[str] = None, environment: ElevenLabsEnvironment) -> str: if base_url is not None: diff --git a/src/elevenlabs/conversational_ai/__init__.py b/src/elevenlabs/conversational_ai/__init__.py index 3f8438dc..f3ea2659 100644 --- a/src/elevenlabs/conversational_ai/__init__.py +++ b/src/elevenlabs/conversational_ai/__init__.py @@ -1,19 +1,2 @@ # This file was auto-generated by Fern from our API Definition. -from .types import ( - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem, - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New, - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored, - PatchConvaiSettingsRequestSecretsItem, - PatchConvaiSettingsRequestSecretsItem_New, - PatchConvaiSettingsRequestSecretsItem_Stored, -) - -__all__ = [ - "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem", - "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New", - "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored", - "PatchConvaiSettingsRequestSecretsItem", - "PatchConvaiSettingsRequestSecretsItem_New", - "PatchConvaiSettingsRequestSecretsItem_Stored", -] diff --git a/src/elevenlabs/conversational_ai/client.py b/src/elevenlabs/conversational_ai/client.py index 9b12641f..d60fd683 100644 --- a/src/elevenlabs/conversational_ai/client.py +++ b/src/elevenlabs/conversational_ai/client.py @@ -9,15 +9,12 @@ from ..types.http_validation_error import HttpValidationError from json.decoder import JSONDecodeError from ..core.api_error import ApiError -from ..types.conversational_config import ConversationalConfig +from ..types.conversational_config_api_model import ConversationalConfigApiModel from ..types.agent_platform_settings_request_model import AgentPlatformSettingsRequestModel from ..types.create_agent_response_model import CreateAgentResponseModel from ..core.serialization import convert_and_respect_annotation_metadata from ..types.get_agent_response_model import GetAgentResponseModel from ..core.jsonable_encoder import jsonable_encoder -from .types.body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item import ( - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem, -) from ..types.get_agent_embed_response_model import GetAgentEmbedResponseModel from ..types.get_agent_link_response_model import GetAgentLinkResponseModel from .. import core @@ -32,13 +29,11 @@ from ..types.get_phone_number_response_model import GetPhoneNumberResponseModel from ..types.get_knowledge_base_list_response_model import GetKnowledgeBaseListResponseModel from ..types.add_knowledge_base_response_model import AddKnowledgeBaseResponseModel +from ..types.embedding_model_enum import EmbeddingModelEnum +from ..types.rag_index_response_model import RagIndexResponseModel from ..types.get_knowledge_base_response_model import GetKnowledgeBaseResponseModel from ..types.get_knowledge_base_dependent_agents_response_model import GetKnowledgeBaseDependentAgentsResponseModel -from ..types.tools_response_model import ToolsResponseModel -from ..types.tool_request_model import ToolRequestModel -from ..types.tool_response_model import ToolResponseModel -from ..types.get_convai_settings_response_model import GetConvaiSettingsResponseModel -from .types.patch_convai_settings_request_secrets_item import PatchConvaiSettingsRequestSecretsItem +from ..types.get_conv_ai_settings_response_model import GetConvAiSettingsResponseModel from ..types.conversation_initiation_client_data_webhook import ConversationInitiationClientDataWebhook from ..types.conv_ai_webhooks import ConvAiWebhooks from ..types.get_workspace_secrets_response_model import GetWorkspaceSecretsResponseModel @@ -118,7 +113,7 @@ def get_signed_url( def create_agent( self, *, - conversation_config: ConversationalConfig, + conversation_config: ConversationalConfigApiModel, use_tool_ids: typing.Optional[bool] = None, platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT, name: typing.Optional[str] = OMIT, @@ -129,7 +124,7 @@ def create_agent( Parameters ---------- - conversation_config : ConversationalConfig + conversation_config : ConversationalConfigApiModel Conversation configuration for an agent use_tool_ids : typing.Optional[bool] @@ -151,13 +146,13 @@ def create_agent( Examples -------- - from elevenlabs import ConversationalConfig, ElevenLabs + from elevenlabs import ConversationalConfigApiModel, ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) client.conversational_ai.create_agent( - conversation_config=ConversationalConfig(), + conversation_config=ConversationalConfigApiModel(), ) """ _response = self._client_wrapper.httpx_client.request( @@ -168,7 +163,7 @@ def create_agent( }, json={ "conversation_config": convert_and_respect_annotation_metadata( - object_=conversation_config, annotation=ConversationalConfig, direction="write" + object_=conversation_config, annotation=ConversationalConfigApiModel, direction="write" ), "platform_settings": convert_and_respect_annotation_metadata( object_=platform_settings, annotation=AgentPlatformSettingsRequestModel, direction="write" @@ -330,9 +325,6 @@ def update_agent( use_tool_ids: typing.Optional[bool] = None, conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - secrets: typing.Optional[ - typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem] - ] = OMIT, name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> GetAgentResponseModel: @@ -353,9 +345,6 @@ def update_agent( platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. - secrets : typing.Optional[typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]] - A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones - name : typing.Optional[str] A name to make the agent easier to find @@ -387,11 +376,6 @@ def update_agent( json={ "conversation_config": conversation_config, "platform_settings": platform_settings, - "secrets": convert_and_respect_annotation_metadata( - object_=secrets, - annotation=typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem], - direction="write", - ), "name": name, }, headers={ @@ -1115,7 +1099,7 @@ def create_phone_number( Twilio Account SID token : str - Twilio Token + Twilio Auth Token request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1509,6 +1493,7 @@ def get_knowledge_base_list( def add_to_knowledge_base( self, *, + name: typing.Optional[str] = OMIT, url: typing.Optional[str] = OMIT, file: typing.Optional[core.File] = OMIT, request_options: typing.Optional[RequestOptions] = None, @@ -1518,6 +1503,9 @@ def add_to_knowledge_base( Parameters ---------- + name : typing.Optional[str] + A custom, human-readable name for the document. + url : typing.Optional[str] URL to a page of documentation that the agent will have access to in order to interact with users. @@ -1545,6 +1533,7 @@ def add_to_knowledge_base( "v1/convai/knowledge-base", method="POST", data={ + "name": name, "url": url, }, files={ @@ -1577,6 +1566,86 @@ def add_to_knowledge_base( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def rag_index_status( + self, + documentation_id: str, + *, + model: EmbeddingModelEnum, + force_reindex: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> RagIndexResponseModel: + """ + In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status. + + Parameters + ---------- + documentation_id : str + The id of a document from the knowledge base. This is returned on document addition. + + model : EmbeddingModelEnum + + force_reindex : typing.Optional[bool] + In case the document is indexed and for some reason you want to reindex it, set this param as true. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RagIndexResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.conversational_ai.rag_index_status( + documentation_id="21m00Tcm4TlvDq8ikWAM", + model="e5_mistral_7b_instruct", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/rag-index", + method="POST", + params={ + "force_reindex": force_reindex, + }, + json={ + "model": model, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + RagIndexResponseModel, + construct_type( + type_=RagIndexResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def get_knowledge_base_document_by_id( self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> GetKnowledgeBaseResponseModel: @@ -1769,9 +1838,11 @@ def get_dependent_agents( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_tools(self, *, request_options: typing.Optional[RequestOptions] = None) -> ToolsResponseModel: + def get_settings( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetConvAiSettingsResponseModel: """ - Get all available tools available in the workspace. + Retrieve Convai settings for the workspace Parameters ---------- @@ -1780,7 +1851,7 @@ def get_tools(self, *, request_options: typing.Optional[RequestOptions] = None) Returns ------- - ToolsResponseModel + GetConvAiSettingsResponseModel Successful Response Examples @@ -1790,19 +1861,19 @@ def get_tools(self, *, request_options: typing.Optional[RequestOptions] = None) client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.get_tools() + client.conversational_ai.get_settings() """ _response = self._client_wrapper.httpx_client.request( - "v1/convai/tools", + "v1/convai/settings", method="GET", request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ToolsResponseModel, + GetConvAiSettingsResponseModel, construct_type( - type_=ToolsResponseModel, # type: ignore + type_=GetConvAiSettingsResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1821,63 +1892,64 @@ def get_tools(self, *, request_options: typing.Optional[RequestOptions] = None) raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def add_tool( - self, *, request: ToolRequestModel, request_options: typing.Optional[RequestOptions] = None - ) -> ToolResponseModel: + def update_settings( + self, + *, + conversation_initiation_client_data_webhook: typing.Optional[ConversationInitiationClientDataWebhook] = OMIT, + webhooks: typing.Optional[ConvAiWebhooks] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetConvAiSettingsResponseModel: """ - Add a new tool to the available tools in the workspace. + Update Convai settings for the workspace Parameters ---------- - request : ToolRequestModel + conversation_initiation_client_data_webhook : typing.Optional[ConversationInitiationClientDataWebhook] + + webhooks : typing.Optional[ConvAiWebhooks] request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ToolResponseModel + GetConvAiSettingsResponseModel Successful Response Examples -------- - from elevenlabs import ( - ElevenLabs, - ToolRequestModel, - ToolRequestModelToolConfig_Webhook, - WebhookToolApiSchemaConfig, - ) + from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.add_tool( - request=ToolRequestModel( - tool_config=ToolRequestModelToolConfig_Webhook( - name="name", - description="description", - api_schema=WebhookToolApiSchemaConfig( - url="url", - ), - ), - ), - ) + client.conversational_ai.update_settings() """ _response = self._client_wrapper.httpx_client.request( - "v1/convai/tools", - method="POST", - json=convert_and_respect_annotation_metadata( - object_=request, annotation=ToolRequestModel, direction="write" - ), + "v1/convai/settings", + method="PATCH", + json={ + "conversation_initiation_client_data_webhook": convert_and_respect_annotation_metadata( + object_=conversation_initiation_client_data_webhook, + annotation=ConversationInitiationClientDataWebhook, + direction="write", + ), + "webhooks": convert_and_respect_annotation_metadata( + object_=webhooks, annotation=ConvAiWebhooks, direction="write" + ), + }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ToolResponseModel, + GetConvAiSettingsResponseModel, construct_type( - type_=ToolResponseModel, # type: ignore + type_=GetConvAiSettingsResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1896,21 +1968,20 @@ def add_tool( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_tool(self, tool_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> ToolResponseModel: + def get_secrets( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetWorkspaceSecretsResponseModel: """ - Get tool that is available in the workspace. + Get all workspace secrets for the user Parameters ---------- - tool_id : str - ID of the requested tool. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ToolResponseModel + GetWorkspaceSecretsResponseModel Successful Response Examples @@ -1920,21 +1991,19 @@ def get_tool(self, tool_id: str, *, request_options: typing.Optional[RequestOpti client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.get_tool( - tool_id="tool_id", - ) + client.conversational_ai.get_secrets() """ _response = self._client_wrapper.httpx_client.request( - f"v1/convai/tools/{jsonable_encoder(tool_id)}", + "v1/convai/secrets", method="GET", request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ToolResponseModel, + GetWorkspaceSecretsResponseModel, construct_type( - type_=ToolResponseModel, # type: ignore + type_=GetWorkspaceSecretsResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1953,23 +2022,24 @@ def get_tool(self, tool_id: str, *, request_options: typing.Optional[RequestOpti raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def remove_tool( - self, tool_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + def create_secret( + self, *, name: str, value: str, request_options: typing.Optional[RequestOptions] = None + ) -> PostWorkspaceSecretResponseModel: """ - Delete tool from the workspace. + Create a new secret for the workspace Parameters ---------- - tool_id : str - ID of the requested tool. + name : str + + value : str request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[typing.Any] + PostWorkspaceSecretResponseModel Successful Response Examples @@ -1979,21 +2049,31 @@ def remove_tool( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.remove_tool( - tool_id="tool_id", + client.conversational_ai.create_secret( + name="name", + value="value", ) """ _response = self._client_wrapper.httpx_client.request( - f"v1/convai/tools/{jsonable_encoder(tool_id)}", - method="DELETE", + "v1/convai/secrets", + method="POST", + json={ + "name": name, + "value": value, + "type": "new", + }, + headers={ + "content-type": "application/json", + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + PostWorkspaceSecretResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=PostWorkspaceSecretResponseModel, # type: ignore object_=_response.json(), ), ) @@ -2012,70 +2092,40 @@ def remove_tool( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def update_tool( - self, tool_id: str, *, request: ToolRequestModel, request_options: typing.Optional[RequestOptions] = None - ) -> ToolResponseModel: + def delete_secret(self, secret_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: """ - Update tool that is available in the workspace. + Delete a workspace secret if it's not in use Parameters ---------- - tool_id : str - ID of the requested tool. - - request : ToolRequestModel + secret_id : str request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ToolResponseModel - Successful Response + None Examples -------- - from elevenlabs import ( - ElevenLabs, - ToolRequestModel, - ToolRequestModelToolConfig_Webhook, - WebhookToolApiSchemaConfig, - ) + from elevenlabs import ElevenLabs client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.update_tool( - tool_id="tool_id", - request=ToolRequestModel( - tool_config=ToolRequestModelToolConfig_Webhook( - name="name", - description="description", - api_schema=WebhookToolApiSchemaConfig( - url="url", - ), - ), - ), + client.conversational_ai.delete_secret( + secret_id="secret_id", ) """ _response = self._client_wrapper.httpx_client.request( - f"v1/convai/tools/{jsonable_encoder(tool_id)}", - method="PATCH", - json=convert_and_respect_annotation_metadata( - object_=request, annotation=ToolRequestModel, direction="write" - ), + f"v1/convai/secrets/{jsonable_encoder(secret_id)}", + method="DELETE", request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return typing.cast( - ToolResponseModel, - construct_type( - type_=ToolResponseModel, # type: ignore - object_=_response.json(), - ), - ) + return if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( @@ -2091,42 +2141,63 @@ def update_tool( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_settings( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> GetConvaiSettingsResponseModel: + +class AsyncConversationalAiClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def get_signed_url( + self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None + ) -> ConversationSignedUrlResponseModel: """ - Retrieve Convai settings for the workspace + Get a signed url to start a conversation with an agent with an agent that requires authorization Parameters ---------- + agent_id : str + The id of the agent you're taking the action on. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetConvaiSettingsResponseModel + ConversationSignedUrlResponseModel Successful Response Examples -------- - from elevenlabs import ElevenLabs + import asyncio - client = ElevenLabs( + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.get_settings() + + + async def main() -> None: + await client.conversational_ai.get_signed_url( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v1/convai/settings", + _response = await self._client_wrapper.httpx_client.request( + "v1/convai/conversation/get_signed_url", method="GET", + params={ + "agent_id": agent_id, + }, request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetConvaiSettingsResponseModel, + ConversationSignedUrlResponseModel, construct_type( - type_=GetConvaiSettingsResponseModel, # type: ignore + type_=ConversationSignedUrlResponseModel, # type: ignore object_=_response.json(), ), ) @@ -2145,69 +2216,73 @@ def get_settings( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def update_settings( + async def create_agent( self, *, - secrets: typing.Sequence[PatchConvaiSettingsRequestSecretsItem], - conversation_initiation_client_data_webhook: typing.Optional[ConversationInitiationClientDataWebhook] = OMIT, - webhooks: typing.Optional[ConvAiWebhooks] = OMIT, + conversation_config: ConversationalConfigApiModel, + use_tool_ids: typing.Optional[bool] = None, + platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT, + name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> GetConvaiSettingsResponseModel: + ) -> CreateAgentResponseModel: """ - Update Convai settings for the workspace + Create an agent from a config object Parameters ---------- - secrets : typing.Sequence[PatchConvaiSettingsRequestSecretsItem] + conversation_config : ConversationalConfigApiModel + Conversation configuration for an agent - conversation_initiation_client_data_webhook : typing.Optional[ConversationInitiationClientDataWebhook] + use_tool_ids : typing.Optional[bool] + Use tool ids instead of tools specs from request payload. - webhooks : typing.Optional[ConvAiWebhooks] + platform_settings : typing.Optional[AgentPlatformSettingsRequestModel] + Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. + + name : typing.Optional[str] + A name to make the agent easier to find request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetConvaiSettingsResponseModel + CreateAgentResponseModel Successful Response Examples -------- - from elevenlabs import ElevenLabs - from elevenlabs.conversational_ai import ( - PatchConvaiSettingsRequestSecretsItem_New, - ) + import asyncio - client = ElevenLabs( + from elevenlabs import AsyncElevenLabs, ConversationalConfigApiModel + + client = AsyncElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.update_settings( - secrets=[ - PatchConvaiSettingsRequestSecretsItem_New( - name="name", - value="value", - ) - ], - ) + + + async def main() -> None: + await client.conversational_ai.create_agent( + conversation_config=ConversationalConfigApiModel(), + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v1/convai/settings", - method="PATCH", + _response = await self._client_wrapper.httpx_client.request( + "v1/convai/agents/create", + method="POST", + params={ + "use_tool_ids": use_tool_ids, + }, json={ - "conversation_initiation_client_data_webhook": convert_and_respect_annotation_metadata( - object_=conversation_initiation_client_data_webhook, - annotation=ConversationInitiationClientDataWebhook, - direction="write", + "conversation_config": convert_and_respect_annotation_metadata( + object_=conversation_config, annotation=ConversationalConfigApiModel, direction="write" ), - "webhooks": convert_and_respect_annotation_metadata( - object_=webhooks, annotation=ConvAiWebhooks, direction="write" - ), - "secrets": convert_and_respect_annotation_metadata( - object_=secrets, - annotation=typing.Sequence[PatchConvaiSettingsRequestSecretsItem], - direction="write", + "platform_settings": convert_and_respect_annotation_metadata( + object_=platform_settings, annotation=AgentPlatformSettingsRequestModel, direction="write" ), + "name": name, }, headers={ "content-type": "application/json", @@ -2218,9 +2293,9 @@ def update_settings( try: if 200 <= _response.status_code < 300: return typing.cast( - GetConvaiSettingsResponseModel, + CreateAgentResponseModel, construct_type( - type_=GetConvaiSettingsResponseModel, # type: ignore + type_=CreateAgentResponseModel, # type: ignore object_=_response.json(), ), ) @@ -2239,112 +2314,55 @@ def update_settings( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_secrets( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> GetWorkspaceSecretsResponseModel: + async def get_agent( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetAgentResponseModel: """ - Get all secrets for the workspace + Retrieve config for an agent Parameters ---------- + agent_id : str + The id of an agent. This is returned on agent creation. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetWorkspaceSecretsResponseModel + GetAgentResponseModel Successful Response Examples -------- - from elevenlabs import ElevenLabs + import asyncio - client = ElevenLabs( + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( api_key="YOUR_API_KEY", ) - client.conversational_ai.get_secrets() - """ - _response = self._client_wrapper.httpx_client.request( - "v1/convai/secrets", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - GetWorkspaceSecretsResponseModel, - construct_type( - type_=GetWorkspaceSecretsResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def create_secret( - self, *, name: str, value: str, request_options: typing.Optional[RequestOptions] = None - ) -> PostWorkspaceSecretResponseModel: - """ - Create a new secret for the workspace - - Parameters - ---------- - name : str - - value : str - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - Returns - ------- - PostWorkspaceSecretResponseModel - Successful Response + async def main() -> None: + await client.conversational_ai.get_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", + ) - Examples - -------- - from elevenlabs import ElevenLabs - client = ElevenLabs( - api_key="YOUR_API_KEY", - ) - client.conversational_ai.create_secret( - name="name", - value="value", - ) + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v1/convai/secrets", - method="POST", - json={ - "name": name, - "value": value, - "type": "new", - }, - headers={ - "content-type": "application/json", - }, + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="GET", request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - PostWorkspaceSecretResponseModel, + GetAgentResponseModel, construct_type( - type_=PostWorkspaceSecretResponseModel, # type: ignore + type_=GetAgentResponseModel, # type: ignore object_=_response.json(), ), ) @@ -2363,28 +2381,23 @@ def create_secret( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - -class AsyncConversationalAiClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def get_signed_url( - self, *, agent_id: str, request_options: typing.Optional[RequestOptions] = None - ) -> ConversationSignedUrlResponseModel: + async def delete_agent( + self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: """ - Get a signed url to start a conversation with an agent with an agent that requires authorization + Delete an agent Parameters ---------- agent_id : str - The id of the agent you're taking the action on. + The id of an agent. This is returned on agent creation. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ConversationSignedUrlResponseModel + typing.Optional[typing.Any] Successful Response Examples @@ -2399,7 +2412,7 @@ async def get_signed_url( async def main() -> None: - await client.conversational_ai.get_signed_url( + await client.conversational_ai.delete_agent( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -2407,19 +2420,16 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v1/convai/conversation/get_signed_url", - method="GET", - params={ - "agent_id": agent_id, - }, + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="DELETE", request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ConversationSignedUrlResponseModel, + typing.Optional[typing.Any], construct_type( - type_=ConversationSignedUrlResponseModel, # type: ignore + type_=typing.Optional[typing.Any], # type: ignore object_=_response.json(), ), ) @@ -2438,27 +2448,31 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def create_agent( + async def update_agent( self, + agent_id: str, *, - conversation_config: ConversationalConfig, use_tool_ids: typing.Optional[bool] = None, - platform_settings: typing.Optional[AgentPlatformSettingsRequestModel] = OMIT, + conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, name: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> CreateAgentResponseModel: + ) -> GetAgentResponseModel: """ - Create an agent from a config object + Patches an Agent settings Parameters ---------- - conversation_config : ConversationalConfig - Conversation configuration for an agent + agent_id : str + The id of an agent. This is returned on agent creation. use_tool_ids : typing.Optional[bool] Use tool ids instead of tools specs from request payload. - platform_settings : typing.Optional[AgentPlatformSettingsRequestModel] + conversation_config : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Conversation configuration for an agent + + platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. name : typing.Optional[str] @@ -2469,14 +2483,14 @@ async def create_agent( Returns ------- - CreateAgentResponseModel + GetAgentResponseModel Successful Response Examples -------- import asyncio - from elevenlabs import AsyncElevenLabs, ConversationalConfig + from elevenlabs import AsyncElevenLabs client = AsyncElevenLabs( api_key="YOUR_API_KEY", @@ -2484,26 +2498,22 @@ async def create_agent( async def main() -> None: - await client.conversational_ai.create_agent( - conversation_config=ConversationalConfig(), + await client.conversational_ai.update_agent( + agent_id="21m00Tcm4TlvDq8ikWAM", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v1/convai/agents/create", - method="POST", + f"v1/convai/agents/{jsonable_encoder(agent_id)}", + method="PATCH", params={ "use_tool_ids": use_tool_ids, }, json={ - "conversation_config": convert_and_respect_annotation_metadata( - object_=conversation_config, annotation=ConversationalConfig, direction="write" - ), - "platform_settings": convert_and_respect_annotation_metadata( - object_=platform_settings, annotation=AgentPlatformSettingsRequestModel, direction="write" - ), + "conversation_config": conversation_config, + "platform_settings": platform_settings, "name": name, }, headers={ @@ -2515,9 +2525,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - CreateAgentResponseModel, + GetAgentResponseModel, construct_type( - type_=CreateAgentResponseModel, # type: ignore + type_=GetAgentResponseModel, # type: ignore object_=_response.json(), ), ) @@ -2536,23 +2546,30 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_agent( - self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> GetAgentResponseModel: + async def get_agent_widget( + self, + agent_id: str, + *, + conversation_signature: typing.Optional[str] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetAgentEmbedResponseModel: """ - Retrieve config for an agent + Retrieve the widget configuration for an agent Parameters ---------- agent_id : str The id of an agent. This is returned on agent creation. + conversation_signature : typing.Optional[str] + An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetAgentResponseModel + GetAgentEmbedResponseModel Successful Response Examples @@ -2567,7 +2584,7 @@ async def get_agent( async def main() -> None: - await client.conversational_ai.get_agent( + await client.conversational_ai.get_agent_widget( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -2575,16 +2592,19 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}", + f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget", method="GET", + params={ + "conversation_signature": conversation_signature, + }, request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetAgentResponseModel, + GetAgentEmbedResponseModel, construct_type( - type_=GetAgentResponseModel, # type: ignore + type_=GetAgentEmbedResponseModel, # type: ignore object_=_response.json(), ), ) @@ -2603,11 +2623,11 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def delete_agent( + async def get_agent_link( self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> GetAgentLinkResponseModel: """ - Delete an agent + Get the current link used to share the agent with others Parameters ---------- @@ -2619,7 +2639,7 @@ async def delete_agent( Returns ------- - typing.Optional[typing.Any] + GetAgentLinkResponseModel Successful Response Examples @@ -2634,7 +2654,7 @@ async def delete_agent( async def main() -> None: - await client.conversational_ai.delete_agent( + await client.conversational_ai.get_agent_link( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -2642,16 +2662,16 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}", - method="DELETE", + f"v1/convai/agents/{jsonable_encoder(agent_id)}/link", + method="GET", request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + GetAgentLinkResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=GetAgentLinkResponseModel, # type: ignore object_=_response.json(), ), ) @@ -2670,48 +2690,26 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def update_agent( - self, - agent_id: str, - *, - use_tool_ids: typing.Optional[bool] = None, - conversation_config: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - platform_settings: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - secrets: typing.Optional[ - typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem] - ] = OMIT, - name: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> GetAgentResponseModel: + async def post_agent_avatar( + self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None + ) -> PostAgentAvatarResponseModel: """ - Patches an Agent settings + Sets the avatar for an agent displayed in the widget Parameters ---------- agent_id : str The id of an agent. This is returned on agent creation. - use_tool_ids : typing.Optional[bool] - Use tool ids instead of tools specs from request payload. - - conversation_config : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Conversation configuration for an agent - - platform_settings : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Platform settings for the agent are all settings that aren't related to the conversation orchestration and content. - - secrets : typing.Optional[typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem]] - A list of secrets for the agent. Can be used to add new secrets or update and delete the existing ones - - name : typing.Optional[str] - A name to make the agent easier to find + avatar_file : core.File + See core.File for more documentation request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetAgentResponseModel + PostAgentAvatarResponseModel Successful Response Examples @@ -2726,238 +2724,7 @@ async def update_agent( async def main() -> None: - await client.conversational_ai.update_agent( - agent_id="21m00Tcm4TlvDq8ikWAM", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}", - method="PATCH", - params={ - "use_tool_ids": use_tool_ids, - }, - json={ - "conversation_config": conversation_config, - "platform_settings": platform_settings, - "secrets": convert_and_respect_annotation_metadata( - object_=secrets, - annotation=typing.Sequence[BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem], - direction="write", - ), - "name": name, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - GetAgentResponseModel, - construct_type( - type_=GetAgentResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def get_agent_widget( - self, - agent_id: str, - *, - conversation_signature: typing.Optional[str] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> GetAgentEmbedResponseModel: - """ - Retrieve the widget configuration for an agent - - Parameters - ---------- - agent_id : str - The id of an agent. This is returned on agent creation. - - conversation_signature : typing.Optional[str] - An expiring token that enables a conversation to start. These can be generated for an agent using the /v1/convai/conversation/get_signed_url endpoint - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GetAgentEmbedResponseModel - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.conversational_ai.get_agent_widget( - agent_id="21m00Tcm4TlvDq8ikWAM", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}/widget", - method="GET", - params={ - "conversation_signature": conversation_signature, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - GetAgentEmbedResponseModel, - construct_type( - type_=GetAgentEmbedResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def get_agent_link( - self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> GetAgentLinkResponseModel: - """ - Get the current link used to share the agent with others - - Parameters - ---------- - agent_id : str - The id of an agent. This is returned on agent creation. - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GetAgentLinkResponseModel - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.conversational_ai.get_agent_link( - agent_id="21m00Tcm4TlvDq8ikWAM", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/agents/{jsonable_encoder(agent_id)}/link", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - GetAgentLinkResponseModel, - construct_type( - type_=GetAgentLinkResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def post_agent_avatar( - self, agent_id: str, *, avatar_file: core.File, request_options: typing.Optional[RequestOptions] = None - ) -> PostAgentAvatarResponseModel: - """ - Sets the avatar for an agent displayed in the widget - - Parameters - ---------- - agent_id : str - The id of an agent. This is returned on agent creation. - - avatar_file : core.File - See core.File for more documentation - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - PostAgentAvatarResponseModel - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.conversational_ai.post_agent_avatar( + await client.conversational_ai.post_agent_avatar( agent_id="21m00Tcm4TlvDq8ikWAM", ) @@ -3550,7 +3317,7 @@ async def create_phone_number( Twilio Account SID token : str - Twilio Token + Twilio Auth Token request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -3762,329 +3529,23 @@ async def update_phone_number( *, agent_id: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> GetPhoneNumberResponseModel: - """ - Update Phone Number details by ID - - Parameters - ---------- - phone_number_id : str - The id of an agent. This is returned on agent creation. - - agent_id : typing.Optional[str] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GetPhoneNumberResponseModel - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.conversational_ai.update_phone_number( - phone_number_id="TeaqRRdTcIfIu2i7BYfT", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}", - method="PATCH", - json={ - "agent_id": agent_id, - }, - headers={ - "content-type": "application/json", - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - GetPhoneNumberResponseModel, - construct_type( - type_=GetPhoneNumberResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def get_phone_numbers( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.List[GetPhoneNumberResponseModel]: - """ - Retrieve all Phone Numbers - - Parameters - ---------- - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - typing.List[GetPhoneNumberResponseModel] - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.conversational_ai.get_phone_numbers() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v1/convai/phone-numbers/", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - typing.List[GetPhoneNumberResponseModel], - construct_type( - type_=typing.List[GetPhoneNumberResponseModel], # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def get_knowledge_base_list( - self, - *, - cursor: typing.Optional[str] = None, - page_size: typing.Optional[int] = None, - search: typing.Optional[str] = None, - show_only_owned_documents: typing.Optional[bool] = None, - use_typesense: typing.Optional[bool] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> GetKnowledgeBaseListResponseModel: - """ - Get a list of available knowledge base documents - - Parameters - ---------- - cursor : typing.Optional[str] - Used for fetching next page. Cursor is returned in the response. - - page_size : typing.Optional[int] - How many documents to return at maximum. Can not exceed 100, defaults to 30. - - search : typing.Optional[str] - If specified, the endpoint returns only such knowledge base documents whose names start with this string. - - show_only_owned_documents : typing.Optional[bool] - If set to true, the endpoint will return only documents owned by you (and not shared from somebody else). - - use_typesense : typing.Optional[bool] - If set to true, the endpoint will use typesense DB to search for the documents). - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - GetKnowledgeBaseListResponseModel - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.conversational_ai.get_knowledge_base_list() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v1/convai/knowledge-base", - method="GET", - params={ - "cursor": cursor, - "page_size": page_size, - "search": search, - "show_only_owned_documents": show_only_owned_documents, - "use_typesense": use_typesense, - }, - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - GetKnowledgeBaseListResponseModel, - construct_type( - type_=GetKnowledgeBaseListResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def add_to_knowledge_base( - self, - *, - url: typing.Optional[str] = OMIT, - file: typing.Optional[core.File] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> AddKnowledgeBaseResponseModel: - """ - Uploads a file or reference a webpage to use as part of the shared knowledge base - - Parameters - ---------- - url : typing.Optional[str] - URL to a page of documentation that the agent will have access to in order to interact with users. - - file : typing.Optional[core.File] - See core.File for more documentation - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - AddKnowledgeBaseResponseModel - Successful Response - - Examples - -------- - import asyncio - - from elevenlabs import AsyncElevenLabs - - client = AsyncElevenLabs( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.conversational_ai.add_to_knowledge_base() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v1/convai/knowledge-base", - method="POST", - data={ - "url": url, - }, - files={ - "file": file, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - AddKnowledgeBaseResponseModel, - construct_type( - type_=AddKnowledgeBaseResponseModel, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def get_knowledge_base_document_by_id( - self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> GetKnowledgeBaseResponseModel: + ) -> GetPhoneNumberResponseModel: """ - Get details about a specific documentation making up the agent's knowledge base + Update Phone Number details by ID Parameters ---------- - documentation_id : str - The id of a document from the knowledge base. This is returned on document addition. + phone_number_id : str + The id of an agent. This is returned on agent creation. + + agent_id : typing.Optional[str] request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetKnowledgeBaseResponseModel + GetPhoneNumberResponseModel Successful Response Examples @@ -4099,24 +3560,31 @@ async def get_knowledge_base_document_by_id( async def main() -> None: - await client.conversational_ai.get_knowledge_base_document_by_id( - documentation_id="21m00Tcm4TlvDq8ikWAM", + await client.conversational_ai.update_phone_number( + phone_number_id="TeaqRRdTcIfIu2i7BYfT", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}", - method="GET", + f"v1/convai/phone-numbers/{jsonable_encoder(phone_number_id)}", + method="PATCH", + json={ + "agent_id": agent_id, + }, + headers={ + "content-type": "application/json", + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetKnowledgeBaseResponseModel, + GetPhoneNumberResponseModel, construct_type( - type_=GetKnowledgeBaseResponseModel, # type: ignore + type_=GetPhoneNumberResponseModel, # type: ignore object_=_response.json(), ), ) @@ -4135,23 +3603,20 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def delete_knowledge_base_document( - self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + async def get_phone_numbers( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.List[GetPhoneNumberResponseModel]: """ - Delete a document from the knowledge base + Retrieve all Phone Numbers Parameters ---------- - documentation_id : str - The id of a document from the knowledge base. This is returned on document addition. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[typing.Any] + typing.List[GetPhoneNumberResponseModel] Successful Response Examples @@ -4166,24 +3631,22 @@ async def delete_knowledge_base_document( async def main() -> None: - await client.conversational_ai.delete_knowledge_base_document( - documentation_id="21m00Tcm4TlvDq8ikWAM", - ) + await client.conversational_ai.get_phone_numbers() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}", - method="DELETE", + "v1/convai/phone-numbers/", + method="GET", request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + typing.List[GetPhoneNumberResponseModel], construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=typing.List[GetPhoneNumberResponseModel], # type: ignore object_=_response.json(), ), ) @@ -4202,34 +3665,42 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_dependent_agents( + async def get_knowledge_base_list( self, - documentation_id: str, *, cursor: typing.Optional[str] = None, page_size: typing.Optional[int] = None, + search: typing.Optional[str] = None, + show_only_owned_documents: typing.Optional[bool] = None, + use_typesense: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> GetKnowledgeBaseDependentAgentsResponseModel: + ) -> GetKnowledgeBaseListResponseModel: """ - Get a list of agents depending on this knowledge base document + Get a list of available knowledge base documents Parameters ---------- - documentation_id : str - The id of a document from the knowledge base. This is returned on document addition. - cursor : typing.Optional[str] Used for fetching next page. Cursor is returned in the response. page_size : typing.Optional[int] How many documents to return at maximum. Can not exceed 100, defaults to 30. + search : typing.Optional[str] + If specified, the endpoint returns only such knowledge base documents whose names start with this string. + + show_only_owned_documents : typing.Optional[bool] + If set to true, the endpoint will return only documents owned by you (and not shared from somebody else). + + use_typesense : typing.Optional[bool] + If set to true, the endpoint will use typesense DB to search for the documents). + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - GetKnowledgeBaseDependentAgentsResponseModel + GetKnowledgeBaseListResponseModel Successful Response Examples @@ -4244,28 +3715,29 @@ async def get_dependent_agents( async def main() -> None: - await client.conversational_ai.get_dependent_agents( - documentation_id="21m00Tcm4TlvDq8ikWAM", - ) + await client.conversational_ai.get_knowledge_base_list() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/dependent-agents", + "v1/convai/knowledge-base", method="GET", params={ "cursor": cursor, "page_size": page_size, + "search": search, + "show_only_owned_documents": show_only_owned_documents, + "use_typesense": use_typesense, }, request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GetKnowledgeBaseDependentAgentsResponseModel, + GetKnowledgeBaseListResponseModel, construct_type( - type_=GetKnowledgeBaseDependentAgentsResponseModel, # type: ignore + type_=GetKnowledgeBaseListResponseModel, # type: ignore object_=_response.json(), ), ) @@ -4284,18 +3756,34 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_tools(self, *, request_options: typing.Optional[RequestOptions] = None) -> ToolsResponseModel: + async def add_to_knowledge_base( + self, + *, + name: typing.Optional[str] = OMIT, + url: typing.Optional[str] = OMIT, + file: typing.Optional[core.File] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> AddKnowledgeBaseResponseModel: """ - Get all available tools available in the workspace. + Uploads a file or reference a webpage to use as part of the shared knowledge base Parameters ---------- + name : typing.Optional[str] + A custom, human-readable name for the document. + + url : typing.Optional[str] + URL to a page of documentation that the agent will have access to in order to interact with users. + + file : typing.Optional[core.File] + See core.File for more documentation + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ToolsResponseModel + AddKnowledgeBaseResponseModel Successful Response Examples @@ -4310,22 +3798,30 @@ async def get_tools(self, *, request_options: typing.Optional[RequestOptions] = async def main() -> None: - await client.conversational_ai.get_tools() + await client.conversational_ai.add_to_knowledge_base() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v1/convai/tools", - method="GET", + "v1/convai/knowledge-base", + method="POST", + data={ + "name": name, + "url": url, + }, + files={ + "file": file, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ToolsResponseModel, + AddKnowledgeBaseResponseModel, construct_type( - type_=ToolsResponseModel, # type: ignore + type_=AddKnowledgeBaseResponseModel, # type: ignore object_=_response.json(), ), ) @@ -4344,34 +3840,40 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def add_tool( - self, *, request: ToolRequestModel, request_options: typing.Optional[RequestOptions] = None - ) -> ToolResponseModel: + async def rag_index_status( + self, + documentation_id: str, + *, + model: EmbeddingModelEnum, + force_reindex: typing.Optional[bool] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> RagIndexResponseModel: """ - Add a new tool to the available tools in the workspace. + In case the document is not RAG indexed, it triggers rag indexing task, otherwise it just returns the current status. Parameters ---------- - request : ToolRequestModel + documentation_id : str + The id of a document from the knowledge base. This is returned on document addition. + + model : EmbeddingModelEnum + + force_reindex : typing.Optional[bool] + In case the document is indexed and for some reason you want to reindex it, set this param as true. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ToolResponseModel + RagIndexResponseModel Successful Response Examples -------- import asyncio - from elevenlabs import ( - AsyncElevenLabs, - ToolRequestModel, - ToolRequestModelToolConfig_Webhook, - WebhookToolApiSchemaConfig, - ) + from elevenlabs import AsyncElevenLabs client = AsyncElevenLabs( api_key="YOUR_API_KEY", @@ -4379,36 +3881,35 @@ async def add_tool( async def main() -> None: - await client.conversational_ai.add_tool( - request=ToolRequestModel( - tool_config=ToolRequestModelToolConfig_Webhook( - name="name", - description="description", - api_schema=WebhookToolApiSchemaConfig( - url="url", - ), - ), - ), + await client.conversational_ai.rag_index_status( + documentation_id="21m00Tcm4TlvDq8ikWAM", + model="e5_mistral_7b_instruct", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v1/convai/tools", + f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/rag-index", method="POST", - json=convert_and_respect_annotation_metadata( - object_=request, annotation=ToolRequestModel, direction="write" - ), + params={ + "force_reindex": force_reindex, + }, + json={ + "model": model, + }, + headers={ + "content-type": "application/json", + }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ToolResponseModel, + RagIndexResponseModel, construct_type( - type_=ToolResponseModel, # type: ignore + type_=RagIndexResponseModel, # type: ignore object_=_response.json(), ), ) @@ -4427,23 +3928,23 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def get_tool( - self, tool_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> ToolResponseModel: + async def get_knowledge_base_document_by_id( + self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> GetKnowledgeBaseResponseModel: """ - Get tool that is available in the workspace. + Get details about a specific documentation making up the agent's knowledge base Parameters ---------- - tool_id : str - ID of the requested tool. + documentation_id : str + The id of a document from the knowledge base. This is returned on document addition. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ToolResponseModel + GetKnowledgeBaseResponseModel Successful Response Examples @@ -4458,24 +3959,24 @@ async def get_tool( async def main() -> None: - await client.conversational_ai.get_tool( - tool_id="tool_id", + await client.conversational_ai.get_knowledge_base_document_by_id( + documentation_id="21m00Tcm4TlvDq8ikWAM", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/tools/{jsonable_encoder(tool_id)}", + f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}", method="GET", request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ToolResponseModel, + GetKnowledgeBaseResponseModel, construct_type( - type_=ToolResponseModel, # type: ignore + type_=GetKnowledgeBaseResponseModel, # type: ignore object_=_response.json(), ), ) @@ -4494,16 +3995,16 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def remove_tool( - self, tool_id: str, *, request_options: typing.Optional[RequestOptions] = None + async def delete_knowledge_base_document( + self, documentation_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> typing.Optional[typing.Any]: """ - Delete tool from the workspace. + Delete a document from the knowledge base Parameters ---------- - tool_id : str - ID of the requested tool. + documentation_id : str + The id of a document from the knowledge base. This is returned on document addition. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -4525,15 +4026,15 @@ async def remove_tool( async def main() -> None: - await client.conversational_ai.remove_tool( - tool_id="tool_id", + await client.conversational_ai.delete_knowledge_base_document( + documentation_id="21m00Tcm4TlvDq8ikWAM", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/tools/{jsonable_encoder(tool_id)}", + f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}", method="DELETE", request_options=request_options, ) @@ -4561,37 +4062,41 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def update_tool( - self, tool_id: str, *, request: ToolRequestModel, request_options: typing.Optional[RequestOptions] = None - ) -> ToolResponseModel: + async def get_dependent_agents( + self, + documentation_id: str, + *, + cursor: typing.Optional[str] = None, + page_size: typing.Optional[int] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> GetKnowledgeBaseDependentAgentsResponseModel: """ - Update tool that is available in the workspace. + Get a list of agents depending on this knowledge base document Parameters ---------- - tool_id : str - ID of the requested tool. + documentation_id : str + The id of a document from the knowledge base. This is returned on document addition. - request : ToolRequestModel + cursor : typing.Optional[str] + Used for fetching next page. Cursor is returned in the response. + + page_size : typing.Optional[int] + How many documents to return at maximum. Can not exceed 100, defaults to 30. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - ToolResponseModel + GetKnowledgeBaseDependentAgentsResponseModel Successful Response Examples -------- import asyncio - from elevenlabs import ( - AsyncElevenLabs, - ToolRequestModel, - ToolRequestModelToolConfig_Webhook, - WebhookToolApiSchemaConfig, - ) + from elevenlabs import AsyncElevenLabs client = AsyncElevenLabs( api_key="YOUR_API_KEY", @@ -4599,37 +4104,28 @@ async def update_tool( async def main() -> None: - await client.conversational_ai.update_tool( - tool_id="tool_id", - request=ToolRequestModel( - tool_config=ToolRequestModelToolConfig_Webhook( - name="name", - description="description", - api_schema=WebhookToolApiSchemaConfig( - url="url", - ), - ), - ), + await client.conversational_ai.get_dependent_agents( + documentation_id="21m00Tcm4TlvDq8ikWAM", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/convai/tools/{jsonable_encoder(tool_id)}", - method="PATCH", - json=convert_and_respect_annotation_metadata( - object_=request, annotation=ToolRequestModel, direction="write" - ), + f"v1/convai/knowledge-base/{jsonable_encoder(documentation_id)}/dependent-agents", + method="GET", + params={ + "cursor": cursor, + "page_size": page_size, + }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - ToolResponseModel, + GetKnowledgeBaseDependentAgentsResponseModel, construct_type( - type_=ToolResponseModel, # type: ignore + type_=GetKnowledgeBaseDependentAgentsResponseModel, # type: ignore object_=_response.json(), ), ) @@ -4650,7 +4146,7 @@ async def main() -> None: async def get_settings( self, *, request_options: typing.Optional[RequestOptions] = None - ) -> GetConvaiSettingsResponseModel: + ) -> GetConvAiSettingsResponseModel: """ Retrieve Convai settings for the workspace @@ -4661,7 +4157,7 @@ async def get_settings( Returns ------- - GetConvaiSettingsResponseModel + GetConvAiSettingsResponseModel Successful Response Examples @@ -4689,9 +4185,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - GetConvaiSettingsResponseModel, + GetConvAiSettingsResponseModel, construct_type( - type_=GetConvaiSettingsResponseModel, # type: ignore + type_=GetConvAiSettingsResponseModel, # type: ignore object_=_response.json(), ), ) @@ -4713,18 +4209,15 @@ async def main() -> None: async def update_settings( self, *, - secrets: typing.Sequence[PatchConvaiSettingsRequestSecretsItem], conversation_initiation_client_data_webhook: typing.Optional[ConversationInitiationClientDataWebhook] = OMIT, webhooks: typing.Optional[ConvAiWebhooks] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> GetConvaiSettingsResponseModel: + ) -> GetConvAiSettingsResponseModel: """ Update Convai settings for the workspace Parameters ---------- - secrets : typing.Sequence[PatchConvaiSettingsRequestSecretsItem] - conversation_initiation_client_data_webhook : typing.Optional[ConversationInitiationClientDataWebhook] webhooks : typing.Optional[ConvAiWebhooks] @@ -4734,7 +4227,7 @@ async def update_settings( Returns ------- - GetConvaiSettingsResponseModel + GetConvAiSettingsResponseModel Successful Response Examples @@ -4742,9 +4235,6 @@ async def update_settings( import asyncio from elevenlabs import AsyncElevenLabs - from elevenlabs.conversational_ai import ( - PatchConvaiSettingsRequestSecretsItem_New, - ) client = AsyncElevenLabs( api_key="YOUR_API_KEY", @@ -4752,14 +4242,7 @@ async def update_settings( async def main() -> None: - await client.conversational_ai.update_settings( - secrets=[ - PatchConvaiSettingsRequestSecretsItem_New( - name="name", - value="value", - ) - ], - ) + await client.conversational_ai.update_settings() asyncio.run(main()) @@ -4776,11 +4259,6 @@ async def main() -> None: "webhooks": convert_and_respect_annotation_metadata( object_=webhooks, annotation=ConvAiWebhooks, direction="write" ), - "secrets": convert_and_respect_annotation_metadata( - object_=secrets, - annotation=typing.Sequence[PatchConvaiSettingsRequestSecretsItem], - direction="write", - ), }, headers={ "content-type": "application/json", @@ -4791,9 +4269,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - GetConvaiSettingsResponseModel, + GetConvAiSettingsResponseModel, construct_type( - type_=GetConvaiSettingsResponseModel, # type: ignore + type_=GetConvAiSettingsResponseModel, # type: ignore object_=_response.json(), ), ) @@ -4816,7 +4294,7 @@ async def get_secrets( self, *, request_options: typing.Optional[RequestOptions] = None ) -> GetWorkspaceSecretsResponseModel: """ - Get all secrets for the workspace + Get all workspace secrets for the user Parameters ---------- @@ -4951,3 +4429,60 @@ async def main() -> None: except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + + async def delete_secret(self, secret_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None: + """ + Delete a workspace secret if it's not in use + + Parameters + ---------- + secret_id : str + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + None + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.conversational_ai.delete_secret( + secret_id="secret_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/convai/secrets/{jsonable_encoder(secret_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/elevenlabs/conversational_ai/types/__init__.py b/src/elevenlabs/conversational_ai/types/__init__.py deleted file mode 100644 index 48f22f3a..00000000 --- a/src/elevenlabs/conversational_ai/types/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item import ( - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem, - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New, - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored, -) -from .patch_convai_settings_request_secrets_item import ( - PatchConvaiSettingsRequestSecretsItem, - PatchConvaiSettingsRequestSecretsItem_New, - PatchConvaiSettingsRequestSecretsItem_Stored, -) - -__all__ = [ - "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem", - "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New", - "BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored", - "PatchConvaiSettingsRequestSecretsItem", - "PatchConvaiSettingsRequestSecretsItem_New", - "PatchConvaiSettingsRequestSecretsItem_Stored", -] diff --git a/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py b/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py deleted file mode 100644 index ffcbbd74..00000000 --- a/src/elevenlabs/conversational_ai/types/body_patches_an_agent_settings_v_1_convai_agents_agent_id_patch_secrets_item.py +++ /dev/null @@ -1,48 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations -from ...core.unchecked_base_model import UncheckedBaseModel -import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic -import typing_extensions -from ...core.unchecked_base_model import UnionMetadata - - -class BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New(UncheckedBaseModel): - type: typing.Literal["new"] = "new" - name: str - value: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored(UncheckedBaseModel): - type: typing.Literal["stored"] = "stored" - secret_id: str - name: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem = typing_extensions.Annotated[ - typing.Union[ - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_New, - BodyPatchesAnAgentSettingsV1ConvaiAgentsAgentIdPatchSecretsItem_Stored, - ], - UnionMetadata(discriminant="type"), -] diff --git a/src/elevenlabs/conversational_ai/types/patch_convai_settings_request_secrets_item.py b/src/elevenlabs/conversational_ai/types/patch_convai_settings_request_secrets_item.py deleted file mode 100644 index e5465b26..00000000 --- a/src/elevenlabs/conversational_ai/types/patch_convai_settings_request_secrets_item.py +++ /dev/null @@ -1,45 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations -from ...core.unchecked_base_model import UncheckedBaseModel -import typing -from ...core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic -import typing_extensions -from ...core.unchecked_base_model import UnionMetadata - - -class PatchConvaiSettingsRequestSecretsItem_New(UncheckedBaseModel): - type: typing.Literal["new"] = "new" - name: str - value: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class PatchConvaiSettingsRequestSecretsItem_Stored(UncheckedBaseModel): - type: typing.Literal["stored"] = "stored" - secret_id: str - name: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -PatchConvaiSettingsRequestSecretsItem = typing_extensions.Annotated[ - typing.Union[PatchConvaiSettingsRequestSecretsItem_New, PatchConvaiSettingsRequestSecretsItem_Stored], - UnionMetadata(discriminant="type"), -] diff --git a/src/elevenlabs/core/client_wrapper.py b/src/elevenlabs/core/client_wrapper.py index f879b9f0..a3425fa2 100644 --- a/src/elevenlabs/core/client_wrapper.py +++ b/src/elevenlabs/core/client_wrapper.py @@ -16,7 +16,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "elevenlabs", - "X-Fern-SDK-Version": "1.52.0", + "X-Fern-SDK-Version": "1.53.0", } if self._api_key is not None: headers["xi-api-key"] = self._api_key diff --git a/src/elevenlabs/dubbing/client.py b/src/elevenlabs/dubbing/client.py index 97be0722..1defd413 100644 --- a/src/elevenlabs/dubbing/client.py +++ b/src/elevenlabs/dubbing/client.py @@ -2,16 +2,28 @@ import typing from ..core.client_wrapper import SyncClientWrapper -from .. import core from ..core.request_options import RequestOptions -from ..types.do_dubbing_response import DoDubbingResponse +from ..types.dubbing_resource import DubbingResource +from ..core.jsonable_encoder import jsonable_encoder from ..core.unchecked_base_model import construct_type from ..errors.unprocessable_entity_error import UnprocessableEntityError from ..types.http_validation_error import HttpValidationError from json.decoder import JSONDecodeError from ..core.api_error import ApiError +from ..types.language_added_response import LanguageAddedResponse +from ..types.segment_create_response import SegmentCreateResponse +from ..types.segment_update_response import SegmentUpdateResponse +from ..types.segment_delete_response import SegmentDeleteResponse +from ..types.segment_transcription_response import SegmentTranscriptionResponse +from ..types.segment_translation_response import SegmentTranslationResponse +from ..types.segment_dub_response import SegmentDubResponse +from .. import core +from ..types.do_dubbing_response import DoDubbingResponse from ..types.dubbing_metadata_response import DubbingMetadataResponse -from ..core.jsonable_encoder import jsonable_encoder +from ..types.delete_dubbing_response_model import DeleteDubbingResponseModel +from ..errors.forbidden_error import ForbiddenError +from ..errors.not_found_error import NotFoundError +from ..errors.too_early_error import TooEarlyError from .types.dubbing_get_transcript_for_dub_request_format_type import DubbingGetTranscriptForDubRequestFormatType from ..core.client_wrapper import AsyncClientWrapper @@ -23,70 +35,259 @@ class DubbingClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def dub_a_video_or_an_audio_file( + def get_dubbing_resource( + self, dubbing_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DubbingResource: + """ + Given a dubbing ID generated from the '/v1/dubbing' endpoint with studio enabled, returns the dubbing resource. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DubbingResource + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.get_dubbing_resource( + dubbing_id="dubbing_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + DubbingResource, + construct_type( + type_=DubbingResource, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def add_language_to_resource( + self, dubbing_id: str, *, language: str, request_options: typing.Optional[RequestOptions] = None + ) -> LanguageAddedResponse: + """ + Adds the given ElevenLab Turbo V2/V2.5 language code to the resource. Does not automatically generate transcripts/translations/audio. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + language : str + The Target language. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LanguageAddedResponse + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.add_language_to_resource( + dubbing_id="dubbing_id", + language="language", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/language", + method="POST", + json={ + "language": language, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + LanguageAddedResponse, + construct_type( + type_=LanguageAddedResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def create_segment_for_speaker( self, + dubbing_id: str, + speaker_id: str, *, - target_lang: str, - file: typing.Optional[core.File] = OMIT, - name: typing.Optional[str] = OMIT, - source_url: typing.Optional[str] = OMIT, - source_lang: typing.Optional[str] = OMIT, - num_speakers: typing.Optional[int] = OMIT, - watermark: typing.Optional[bool] = OMIT, - start_time: typing.Optional[int] = OMIT, - end_time: typing.Optional[int] = OMIT, - highest_resolution: typing.Optional[bool] = OMIT, - drop_background_audio: typing.Optional[bool] = OMIT, - use_profanity_filter: typing.Optional[bool] = OMIT, + start_time: float, + end_time: float, + text: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> DoDubbingResponse: + ) -> SegmentCreateResponse: """ - Dubs provided audio or video file into given language. + Creates a new segment in dubbing resource with a start and end time for the speaker in every available language. Does not automatically generate transcripts/translations/audio. Parameters ---------- - target_lang : str - The Target language to dub the content into. + dubbing_id : str + ID of the dubbing project. - file : typing.Optional[core.File] - See core.File for more documentation + speaker_id : str + ID of the speaker. - name : typing.Optional[str] - Name of the dubbing project. + start_time : float - source_url : typing.Optional[str] - URL of the source video/audio file. + end_time : float - source_lang : typing.Optional[str] - Source language. + text : typing.Optional[str] - num_speakers : typing.Optional[int] - Number of speakers to use for the dubbing. Set to 0 to automatically detect the number of speakers + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - watermark : typing.Optional[bool] - Whether to apply watermark to the output video. + Returns + ------- + SegmentCreateResponse + Successful Response - start_time : typing.Optional[int] - Start time of the source video/audio file. + Examples + -------- + from elevenlabs import ElevenLabs - end_time : typing.Optional[int] - End time of the source video/audio file. + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.create_segment_for_speaker( + dubbing_id="dubbing_id", + speaker_id="speaker_id", + start_time=1.1, + end_time=1.1, + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/speaker/{jsonable_encoder(speaker_id)}/segment", + method="POST", + json={ + "start_time": start_time, + "end_time": end_time, + "text": text, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SegmentCreateResponse, + construct_type( + type_=SegmentCreateResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) - highest_resolution : typing.Optional[bool] - Whether to use the highest resolution available. + def update_segment_language( + self, + dubbing_id: str, + segment_id: str, + language: str, + *, + start_time: typing.Optional[float] = OMIT, + end_time: typing.Optional[float] = OMIT, + text: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> SegmentUpdateResponse: + """ + Modifies a single segment with new text and/or start/end times. Will update the values for only a specific language of a segment. Does not automatically regenerate the dub. - drop_background_audio : typing.Optional[bool] - An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues. + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. - use_profanity_filter : typing.Optional[bool] - [BETA] Whether transcripts should have profanities censored with the words '[censored]' + segment_id : str + ID of the segment + + language : str + ID of the language. + + start_time : typing.Optional[float] + + end_time : typing.Optional[float] + + text : typing.Optional[str] request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - DoDubbingResponse + SegmentUpdateResponse Successful Response Examples @@ -96,28 +297,1091 @@ def dub_a_video_or_an_audio_file( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.dubbing.dub_a_video_or_an_audio_file( - target_lang="target_lang", - ) + client.dubbing.update_segment_language( + dubbing_id="dubbing_id", + segment_id="segment_id", + language="language", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/segment/{jsonable_encoder(segment_id)}/{jsonable_encoder(language)}", + method="PATCH", + json={ + "start_time": start_time, + "end_time": end_time, + "text": text, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SegmentUpdateResponse, + construct_type( + type_=SegmentUpdateResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_segment( + self, dubbing_id: str, segment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> SegmentDeleteResponse: + """ + Deletes a single segment from the dubbing. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + segment_id : str + ID of the segment + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SegmentDeleteResponse + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.delete_segment( + dubbing_id="dubbing_id", + segment_id="segment_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/segment/{jsonable_encoder(segment_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SegmentDeleteResponse, + construct_type( + type_=SegmentDeleteResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def transcribe_segments( + self, + dubbing_id: str, + *, + segments: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> SegmentTranscriptionResponse: + """ + Regenerate the transcriptions for the specified segments. Does not automatically regenerate translations or dubs. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + segments : typing.Sequence[str] + Transcribe this specific list of segments. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SegmentTranscriptionResponse + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.transcribe_segments( + dubbing_id="dubbing_id", + segments=["segments"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/transcribe", + method="POST", + json={ + "segments": segments, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SegmentTranscriptionResponse, + construct_type( + type_=SegmentTranscriptionResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def translate_segments( + self, + dubbing_id: str, + *, + segments: typing.Sequence[str], + languages: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> SegmentTranslationResponse: + """ + Regenerate the translations for either the entire resource or the specified segments/languages. Will automatically transcribe missing transcriptions. Will not automatically regenerate the dubs. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + segments : typing.Sequence[str] + Translate only this list of segments. + + languages : typing.Sequence[str] + Translate only these languages for each segment. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SegmentTranslationResponse + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.translate_segments( + dubbing_id="dubbing_id", + segments=["segments"], + languages=["languages"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/translate", + method="POST", + json={ + "segments": segments, + "languages": languages, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SegmentTranslationResponse, + construct_type( + type_=SegmentTranslationResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def dub_segments( + self, + dubbing_id: str, + *, + segments: typing.Sequence[str], + languages: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> SegmentDubResponse: + """ + Regenerate the dubs for either the entire resource or the specified segments/languages. Will automatically transcribe and translate any missing transcriptions and translations. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + segments : typing.Sequence[str] + Dub only this list of segments. + + languages : typing.Sequence[str] + Dub only these languages for each segment. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SegmentDubResponse + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.dub_segments( + dubbing_id="dubbing_id", + segments=["segments"], + languages=["languages"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/dub", + method="POST", + json={ + "segments": segments, + "languages": languages, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SegmentDubResponse, + construct_type( + type_=SegmentDubResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def dub_a_video_or_an_audio_file( + self, + *, + target_lang: str, + file: typing.Optional[core.File] = OMIT, + name: typing.Optional[str] = OMIT, + source_url: typing.Optional[str] = OMIT, + source_lang: typing.Optional[str] = OMIT, + num_speakers: typing.Optional[int] = OMIT, + watermark: typing.Optional[bool] = OMIT, + start_time: typing.Optional[int] = OMIT, + end_time: typing.Optional[int] = OMIT, + highest_resolution: typing.Optional[bool] = OMIT, + drop_background_audio: typing.Optional[bool] = OMIT, + use_profanity_filter: typing.Optional[bool] = OMIT, + dubbing_studio: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DoDubbingResponse: + """ + Dubs a provided audio or video file into given language. + + Parameters + ---------- + target_lang : str + The Target language to dub the content into. + + file : typing.Optional[core.File] + See core.File for more documentation + + name : typing.Optional[str] + Name of the dubbing project. + + source_url : typing.Optional[str] + URL of the source video/audio file. + + source_lang : typing.Optional[str] + Source language. + + num_speakers : typing.Optional[int] + Number of speakers to use for the dubbing. Set to 0 to automatically detect the number of speakers + + watermark : typing.Optional[bool] + Whether to apply watermark to the output video. + + start_time : typing.Optional[int] + Start time of the source video/audio file. + + end_time : typing.Optional[int] + End time of the source video/audio file. + + highest_resolution : typing.Optional[bool] + Whether to use the highest resolution available. + + drop_background_audio : typing.Optional[bool] + An advanced setting. Whether to drop background audio from the final dub. This can improve dub quality where it's known that audio shouldn't have a background track such as for speeches or monologues. + + use_profanity_filter : typing.Optional[bool] + [BETA] Whether transcripts should have profanities censored with the words '[censored]' + + dubbing_studio : typing.Optional[bool] + Whether to prepare dub for edits in dubbing studio or edits as a dubbing resource. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DoDubbingResponse + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.dub_a_video_or_an_audio_file( + target_lang="target_lang", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v1/dubbing", + method="POST", + data={ + "name": name, + "source_url": source_url, + "source_lang": source_lang, + "target_lang": target_lang, + "num_speakers": num_speakers, + "watermark": watermark, + "start_time": start_time, + "end_time": end_time, + "highest_resolution": highest_resolution, + "drop_background_audio": drop_background_audio, + "use_profanity_filter": use_profanity_filter, + "dubbing_studio": dubbing_studio, + }, + files={ + "file": file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + DoDubbingResponse, + construct_type( + type_=DoDubbingResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_dubbing_project_metadata( + self, dubbing_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DubbingMetadataResponse: + """ + Returns metadata about a dubbing project, including whether it's still in progress or not + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DubbingMetadataResponse + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.get_dubbing_project_metadata( + dubbing_id="dubbing_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/{jsonable_encoder(dubbing_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + DubbingMetadataResponse, + construct_type( + type_=DubbingMetadataResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def delete_dubbing_project( + self, dubbing_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DeleteDubbingResponseModel: + """ + Deletes a dubbing project. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DeleteDubbingResponseModel + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.delete_dubbing_project( + dubbing_id="dubbing_id", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/{jsonable_encoder(dubbing_id)}", + method="DELETE", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + DeleteDubbingResponseModel, + construct_type( + type_=DeleteDubbingResponseModel, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_dubbed_file( + self, dubbing_id: str, language_code: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Iterator[bytes]: + """ + Returns dubbed file as a streamed file. Videos will be returned in MP4 format and audio only dubs will be returned in MP3. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + language_code : str + ID of the language. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + + Yields + ------ + typing.Iterator[bytes] + The dubbed audio or video file + """ + with self._client_wrapper.httpx_client.stream( + f"v1/dubbing/{jsonable_encoder(dubbing_id)}/audio/{jsonable_encoder(language_code)}", + method="GET", + request_options=request_options, + ) as _response: + try: + if 200 <= _response.status_code < 300: + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): + yield _chunk + return + _response.read() + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 425: + raise TooEarlyError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def get_transcript_for_dub( + self, + dubbing_id: str, + language_code: str, + *, + format_type: typing.Optional[DubbingGetTranscriptForDubRequestFormatType] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> str: + """ + Returns transcript for the dub as an SRT or WEBVTT file. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + language_code : str + ID of the language. + + format_type : typing.Optional[DubbingGetTranscriptForDubRequestFormatType] + Format to use for the subtitle file, either 'srt' or 'webvtt' + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + str + Successful Response + + Examples + -------- + from elevenlabs import ElevenLabs + + client = ElevenLabs( + api_key="YOUR_API_KEY", + ) + client.dubbing.get_transcript_for_dub( + dubbing_id="dubbing_id", + language_code="language_code", + ) + """ + _response = self._client_wrapper.httpx_client.request( + f"v1/dubbing/{jsonable_encoder(dubbing_id)}/transcript/{jsonable_encoder(language_code)}", + method="GET", + params={ + "format_type": format_type, + }, + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + str, + construct_type( + type_=str, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 425: + raise TooEarlyError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncDubbingClient: + def __init__(self, *, client_wrapper: AsyncClientWrapper): + self._client_wrapper = client_wrapper + + async def get_dubbing_resource( + self, dubbing_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> DubbingResource: + """ + Given a dubbing ID generated from the '/v1/dubbing' endpoint with studio enabled, returns the dubbing resource. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DubbingResource + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.dubbing.get_dubbing_resource( + dubbing_id="dubbing_id", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + DubbingResource, + construct_type( + type_=DubbingResource, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def add_language_to_resource( + self, dubbing_id: str, *, language: str, request_options: typing.Optional[RequestOptions] = None + ) -> LanguageAddedResponse: + """ + Adds the given ElevenLab Turbo V2/V2.5 language code to the resource. Does not automatically generate transcripts/translations/audio. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + language : str + The Target language. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + LanguageAddedResponse + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.dubbing.add_language_to_resource( + dubbing_id="dubbing_id", + language="language", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/language", + method="POST", + json={ + "language": language, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + LanguageAddedResponse, + construct_type( + type_=LanguageAddedResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def create_segment_for_speaker( + self, + dubbing_id: str, + speaker_id: str, + *, + start_time: float, + end_time: float, + text: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> SegmentCreateResponse: + """ + Creates a new segment in dubbing resource with a start and end time for the speaker in every available language. Does not automatically generate transcripts/translations/audio. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + speaker_id : str + ID of the speaker. + + start_time : float + + end_time : float + + text : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SegmentCreateResponse + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.dubbing.create_segment_for_speaker( + dubbing_id="dubbing_id", + speaker_id="speaker_id", + start_time=1.1, + end_time=1.1, + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/speaker/{jsonable_encoder(speaker_id)}/segment", + method="POST", + json={ + "start_time": start_time, + "end_time": end_time, + "text": text, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SegmentCreateResponse, + construct_type( + type_=SegmentCreateResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def update_segment_language( + self, + dubbing_id: str, + segment_id: str, + language: str, + *, + start_time: typing.Optional[float] = OMIT, + end_time: typing.Optional[float] = OMIT, + text: typing.Optional[str] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> SegmentUpdateResponse: + """ + Modifies a single segment with new text and/or start/end times. Will update the values for only a specific language of a segment. Does not automatically regenerate the dub. + + Parameters + ---------- + dubbing_id : str + ID of the dubbing project. + + segment_id : str + ID of the segment + + language : str + ID of the language. + + start_time : typing.Optional[float] + + end_time : typing.Optional[float] + + text : typing.Optional[str] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SegmentUpdateResponse + Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.dubbing.update_segment_language( + dubbing_id="dubbing_id", + segment_id="segment_id", + language="language", + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - "v1/dubbing", - method="POST", - data={ - "name": name, - "source_url": source_url, - "source_lang": source_lang, - "target_lang": target_lang, - "num_speakers": num_speakers, - "watermark": watermark, + _response = await self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/segment/{jsonable_encoder(segment_id)}/{jsonable_encoder(language)}", + method="PATCH", + json={ "start_time": start_time, "end_time": end_time, - "highest_resolution": highest_resolution, - "drop_background_audio": drop_background_audio, - "use_profanity_filter": use_profanity_filter, + "text": text, }, - files={ - "file": file, + headers={ + "content-type": "application/json", }, request_options=request_options, omit=OMIT, @@ -125,9 +1389,9 @@ def dub_a_video_or_an_audio_file( try: if 200 <= _response.status_code < 300: return typing.cast( - DoDubbingResponse, + SegmentUpdateResponse, construct_type( - type_=DoDubbingResponse, # type: ignore + type_=SegmentUpdateResponse, # type: ignore object_=_response.json(), ), ) @@ -146,47 +1410,59 @@ def dub_a_video_or_an_audio_file( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_dubbing_project_metadata( - self, dubbing_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> DubbingMetadataResponse: + async def delete_segment( + self, dubbing_id: str, segment_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> SegmentDeleteResponse: """ - Returns metadata about a dubbing project, including whether it's still in progress or not + Deletes a single segment from the dubbing. Parameters ---------- dubbing_id : str ID of the dubbing project. + segment_id : str + ID of the segment + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - DubbingMetadataResponse + SegmentDeleteResponse Successful Response Examples -------- - from elevenlabs import ElevenLabs + import asyncio - client = ElevenLabs( + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( api_key="YOUR_API_KEY", ) - client.dubbing.get_dubbing_project_metadata( - dubbing_id="dubbing_id", - ) + + + async def main() -> None: + await client.dubbing.delete_segment( + dubbing_id="dubbing_id", + segment_id="segment_id", + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - f"v1/dubbing/{jsonable_encoder(dubbing_id)}", - method="GET", + _response = await self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/segment/{jsonable_encoder(segment_id)}", + method="DELETE", request_options=request_options, ) try: if 200 <= _response.status_code < 300: return typing.cast( - DubbingMetadataResponse, + SegmentDeleteResponse, construct_type( - type_=DubbingMetadataResponse, # type: ignore + type_=SegmentDeleteResponse, # type: ignore object_=_response.json(), ), ) @@ -205,47 +1481,70 @@ def get_dubbing_project_metadata( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def delete_dubbing_project( - self, dubbing_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + async def transcribe_segments( + self, + dubbing_id: str, + *, + segments: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> SegmentTranscriptionResponse: """ - Deletes a dubbing project. + Regenerate the transcriptions for the specified segments. Does not automatically regenerate translations or dubs. Parameters ---------- dubbing_id : str ID of the dubbing project. + segments : typing.Sequence[str] + Transcribe this specific list of segments. + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[typing.Any] + SegmentTranscriptionResponse Successful Response Examples -------- - from elevenlabs import ElevenLabs + import asyncio - client = ElevenLabs( + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( api_key="YOUR_API_KEY", ) - client.dubbing.delete_dubbing_project( - dubbing_id="dubbing_id", - ) + + + async def main() -> None: + await client.dubbing.transcribe_segments( + dubbing_id="dubbing_id", + segments=["segments"], + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - f"v1/dubbing/{jsonable_encoder(dubbing_id)}", - method="DELETE", + _response = await self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/transcribe", + method="POST", + json={ + "segments": segments, + }, + headers={ + "content-type": "application/json", + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + SegmentTranscriptionResponse, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=SegmentTranscriptionResponse, # type: ignore object_=_response.json(), ), ) @@ -264,111 +1563,164 @@ def delete_dubbing_project( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def get_dubbed_file( - self, dubbing_id: str, language_code: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Iterator[bytes]: + async def translate_segments( + self, + dubbing_id: str, + *, + segments: typing.Sequence[str], + languages: typing.Sequence[str], + request_options: typing.Optional[RequestOptions] = None, + ) -> SegmentTranslationResponse: """ - Returns dubbed file as a streamed file. Videos will be returned in MP4 format and audio only dubs will be returned in MP3. + Regenerate the translations for either the entire resource or the specified segments/languages. Will automatically transcribe missing transcriptions. Will not automatically regenerate the dubs. Parameters ---------- dubbing_id : str ID of the dubbing project. - language_code : str - ID of the language. + segments : typing.Sequence[str] + Translate only this list of segments. + + languages : typing.Sequence[str] + Translate only these languages for each segment. request_options : typing.Optional[RequestOptions] - Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + Request-specific configuration. - Yields - ------ - typing.Iterator[bytes] + Returns + ------- + SegmentTranslationResponse Successful Response + + Examples + -------- + import asyncio + + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.dubbing.translate_segments( + dubbing_id="dubbing_id", + segments=["segments"], + languages=["languages"], + ) + + + asyncio.run(main()) """ - with self._client_wrapper.httpx_client.stream( - f"v1/dubbing/{jsonable_encoder(dubbing_id)}/audio/{jsonable_encoder(language_code)}", - method="GET", + _response = await self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/translate", + method="POST", + json={ + "segments": segments, + "languages": languages, + }, + headers={ + "content-type": "application/json", + }, request_options=request_options, - ) as _response: - try: - if 200 <= _response.status_code < 300: - _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 - for _chunk in _response.iter_bytes(chunk_size=_chunk_size): - yield _chunk - return - _response.read() - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SegmentTranslationResponse, + construct_type( + type_=SegmentTranslationResponse, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) - def get_transcript_for_dub( + async def dub_segments( self, dubbing_id: str, - language_code: str, *, - format_type: typing.Optional[DubbingGetTranscriptForDubRequestFormatType] = None, + segments: typing.Sequence[str], + languages: typing.Sequence[str], request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> SegmentDubResponse: """ - Returns transcript for the dub as an SRT file. + Regenerate the dubs for either the entire resource or the specified segments/languages. Will automatically transcribe and translate any missing transcriptions and translations. Parameters ---------- dubbing_id : str ID of the dubbing project. - language_code : str - ID of the language. + segments : typing.Sequence[str] + Dub only this list of segments. - format_type : typing.Optional[DubbingGetTranscriptForDubRequestFormatType] - Format to use for the subtitle file, either 'srt' or 'webvtt' + languages : typing.Sequence[str] + Dub only these languages for each segment. request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[typing.Any] + SegmentDubResponse Successful Response Examples -------- - from elevenlabs import ElevenLabs + import asyncio - client = ElevenLabs( + from elevenlabs import AsyncElevenLabs + + client = AsyncElevenLabs( api_key="YOUR_API_KEY", ) - client.dubbing.get_transcript_for_dub( - dubbing_id="dubbing_id", - language_code="language_code", - ) + + + async def main() -> None: + await client.dubbing.dub_segments( + dubbing_id="dubbing_id", + segments=["segments"], + languages=["languages"], + ) + + + asyncio.run(main()) """ - _response = self._client_wrapper.httpx_client.request( - f"v1/dubbing/{jsonable_encoder(dubbing_id)}/transcript/{jsonable_encoder(language_code)}", - method="GET", - params={ - "format_type": format_type, + _response = await self._client_wrapper.httpx_client.request( + f"v1/dubbing/resource/{jsonable_encoder(dubbing_id)}/dub", + method="POST", + json={ + "segments": segments, + "languages": languages, + }, + headers={ + "content-type": "application/json", }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + SegmentDubResponse, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=SegmentDubResponse, # type: ignore object_=_response.json(), ), ) @@ -387,11 +1739,6 @@ def get_transcript_for_dub( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - -class AsyncDubbingClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - async def dub_a_video_or_an_audio_file( self, *, @@ -407,10 +1754,11 @@ async def dub_a_video_or_an_audio_file( highest_resolution: typing.Optional[bool] = OMIT, drop_background_audio: typing.Optional[bool] = OMIT, use_profanity_filter: typing.Optional[bool] = OMIT, + dubbing_studio: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DoDubbingResponse: """ - Dubs provided audio or video file into given language. + Dubs a provided audio or video file into given language. Parameters ---------- @@ -450,6 +1798,9 @@ async def dub_a_video_or_an_audio_file( use_profanity_filter : typing.Optional[bool] [BETA] Whether transcripts should have profanities censored with the words '[censored]' + dubbing_studio : typing.Optional[bool] + Whether to prepare dub for edits in dubbing studio or edits as a dubbing resource. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -492,6 +1843,7 @@ async def main() -> None: "highest_resolution": highest_resolution, "drop_background_audio": drop_background_audio, "use_profanity_filter": use_profanity_filter, + "dubbing_studio": dubbing_studio, }, files={ "file": file, @@ -592,7 +1944,7 @@ async def main() -> None: async def delete_dubbing_project( self, dubbing_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteDubbingResponseModel: """ Deletes a dubbing project. @@ -606,7 +1958,7 @@ async def delete_dubbing_project( Returns ------- - typing.Optional[typing.Any] + DeleteDubbingResponseModel Successful Response Examples @@ -636,9 +1988,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteDubbingResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteDubbingResponseModel, # type: ignore object_=_response.json(), ), ) @@ -677,7 +2029,7 @@ async def get_dubbed_file( Yields ------ typing.AsyncIterator[bytes] - Successful Response + The dubbed audio or video file """ async with self._client_wrapper.httpx_client.stream( f"v1/dubbing/{jsonable_encoder(dubbing_id)}/audio/{jsonable_encoder(language_code)}", @@ -691,6 +2043,26 @@ async def get_dubbed_file( yield _chunk return await _response.aread() + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( @@ -701,6 +2073,16 @@ async def get_dubbed_file( ), ) ) + if _response.status_code == 425: + raise TooEarlyError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -713,9 +2095,9 @@ async def get_transcript_for_dub( *, format_type: typing.Optional[DubbingGetTranscriptForDubRequestFormatType] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> str: """ - Returns transcript for the dub as an SRT file. + Returns transcript for the dub as an SRT or WEBVTT file. Parameters ---------- @@ -733,7 +2115,7 @@ async def get_transcript_for_dub( Returns ------- - typing.Optional[typing.Any] + str Successful Response Examples @@ -767,12 +2149,32 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + str, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=str, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 403: + raise ForbiddenError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 404: + raise NotFoundError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( @@ -783,6 +2185,16 @@ async def main() -> None: ), ) ) + if _response.status_code == 425: + raise TooEarlyError( + typing.cast( + typing.Optional[typing.Any], + construct_type( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/elevenlabs/errors/__init__.py b/src/elevenlabs/errors/__init__.py index cb64e066..2289b828 100644 --- a/src/elevenlabs/errors/__init__.py +++ b/src/elevenlabs/errors/__init__.py @@ -1,5 +1,8 @@ # This file was auto-generated by Fern from our API Definition. +from .forbidden_error import ForbiddenError +from .not_found_error import NotFoundError +from .too_early_error import TooEarlyError from .unprocessable_entity_error import UnprocessableEntityError -__all__ = ["UnprocessableEntityError"] +__all__ = ["ForbiddenError", "NotFoundError", "TooEarlyError", "UnprocessableEntityError"] diff --git a/src/elevenlabs/errors/forbidden_error.py b/src/elevenlabs/errors/forbidden_error.py new file mode 100644 index 00000000..d17eb4b9 --- /dev/null +++ b/src/elevenlabs/errors/forbidden_error.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.api_error import ApiError +import typing + + +class ForbiddenError(ApiError): + def __init__(self, body: typing.Optional[typing.Any]): + super().__init__(status_code=403, body=body) diff --git a/src/elevenlabs/errors/not_found_error.py b/src/elevenlabs/errors/not_found_error.py new file mode 100644 index 00000000..a1235b87 --- /dev/null +++ b/src/elevenlabs/errors/not_found_error.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.api_error import ApiError +import typing + + +class NotFoundError(ApiError): + def __init__(self, body: typing.Optional[typing.Any]): + super().__init__(status_code=404, body=body) diff --git a/src/elevenlabs/errors/too_early_error.py b/src/elevenlabs/errors/too_early_error.py new file mode 100644 index 00000000..33796003 --- /dev/null +++ b/src/elevenlabs/errors/too_early_error.py @@ -0,0 +1,9 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.api_error import ApiError +import typing + + +class TooEarlyError(ApiError): + def __init__(self, body: typing.Optional[typing.Any]): + super().__init__(status_code=425, body=body) diff --git a/src/elevenlabs/projects/client.py b/src/elevenlabs/projects/client.py index 21a7f4b6..754bb8f8 100644 --- a/src/elevenlabs/projects/client.py +++ b/src/elevenlabs/projects/client.py @@ -75,7 +75,7 @@ def create_podcast( The ID of the model to be used for this Studio project, you can query GET /v1/models to list all available models. mode : BodyCreatePodcastV1ProjectsPodcastCreatePostMode - The type of podcast to generate + The type of podcast to generate. Can be 'conversation', an interaction between two voices, or 'bulletin', a monologue. source : BodyCreatePodcastV1ProjectsPodcastCreatePostSource The source content for the Podcast. @@ -130,12 +130,12 @@ def create_podcast( model_id="21m00Tcm4TlvDq8ikWAM", mode=BodyCreatePodcastV1ProjectsPodcastCreatePostMode_Conversation( conversation=PodcastConversationModeData( - host_voice_id="host_voice_id", - guest_voice_id="guest_voice_id", + host_voice_id="aw1NgEzBg83R7vgmiJt6", + guest_voice_id="aw1NgEzBg83R7vgmiJt7", ), ), source=PodcastTextSource( - text="text", + text="This is a test podcast.", ), ) """ @@ -1394,7 +1394,7 @@ def list_chapter_snapshots( self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> ChapterSnapshotsResponse: """ - Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created. + Gets information about all the snapshots of a chapter. Each snapshot can be downloaded as audio. Whenever a chapter is converted a snapshot will automatically be created. Parameters ---------- @@ -1644,7 +1644,7 @@ async def create_podcast( The ID of the model to be used for this Studio project, you can query GET /v1/models to list all available models. mode : BodyCreatePodcastV1ProjectsPodcastCreatePostMode - The type of podcast to generate + The type of podcast to generate. Can be 'conversation', an interaction between two voices, or 'bulletin', a monologue. source : BodyCreatePodcastV1ProjectsPodcastCreatePostSource The source content for the Podcast. @@ -1704,12 +1704,12 @@ async def main() -> None: model_id="21m00Tcm4TlvDq8ikWAM", mode=BodyCreatePodcastV1ProjectsPodcastCreatePostMode_Conversation( conversation=PodcastConversationModeData( - host_voice_id="host_voice_id", - guest_voice_id="guest_voice_id", + host_voice_id="aw1NgEzBg83R7vgmiJt6", + guest_voice_id="aw1NgEzBg83R7vgmiJt7", ), ), source=PodcastTextSource( - text="text", + text="This is a test podcast.", ), ) @@ -3091,7 +3091,7 @@ async def list_chapter_snapshots( self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> ChapterSnapshotsResponse: """ - Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created. + Gets information about all the snapshots of a chapter. Each snapshot can be downloaded as audio. Whenever a chapter is converted a snapshot will automatically be created. Parameters ---------- diff --git a/src/elevenlabs/projects/types/body_create_podcast_v_1_projects_podcast_create_post_mode.py b/src/elevenlabs/projects/types/body_create_podcast_v_1_projects_podcast_create_post_mode.py index 8ade8e84..85cddc92 100644 --- a/src/elevenlabs/projects/types/body_create_podcast_v_1_projects_podcast_create_post_mode.py +++ b/src/elevenlabs/projects/types/body_create_podcast_v_1_projects_podcast_create_post_mode.py @@ -13,7 +13,7 @@ class BodyCreatePodcastV1ProjectsPodcastCreatePostMode_Conversation(UncheckedBaseModel): """ - The type of podcast to generate + The type of podcast to generate. Can be 'conversation', an interaction between two voices, or 'bulletin', a monologue. """ type: typing.Literal["conversation"] = "conversation" @@ -31,7 +31,7 @@ class Config: class BodyCreatePodcastV1ProjectsPodcastCreatePostMode_Bulletin(UncheckedBaseModel): """ - The type of podcast to generate + The type of podcast to generate. Can be 'conversation', an interaction between two voices, or 'bulletin', a monologue. """ type: typing.Literal["bulletin"] = "bulletin" diff --git a/src/elevenlabs/pronunciation_dictionary/client.py b/src/elevenlabs/pronunciation_dictionary/client.py index 44ac4d8a..7d7b16f9 100644 --- a/src/elevenlabs/pronunciation_dictionary/client.py +++ b/src/elevenlabs/pronunciation_dictionary/client.py @@ -56,7 +56,7 @@ def add_from_file( A description of the pronunciation dictionary, used for identification only. workspace_access : typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess] - Should be one of 'editor' or 'viewer'. If not provided, defaults to no access. + Should be one of 'admin', 'editor' or 'viewer'. If not provided, defaults to no access. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -157,8 +157,8 @@ def add_rules( pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", rules=[ PronunciationDictionaryRule_Alias( - string_to_replace="string_to_replace", - alias="alias", + string_to_replace="Thailand", + alias="tie-land", ) ], ) @@ -277,9 +277,9 @@ def remove_rules( def download( self, dictionary_id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> str: + ) -> typing.Iterator[bytes]: """ - Get PLS file with a pronunciation dictionary version rules + Get a PLS file with a pronunciation dictionary version rules Parameters ---------- @@ -290,12 +290,12 @@ def download( The id of the version of the pronunciation dictionary request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. - Returns - ------- - str - Successful Response + Yields + ------ + typing.Iterator[bytes] + The PLS file containing pronunciation dictionary rules Examples -------- @@ -309,28 +309,32 @@ def download( version_id="KZFyRUq3R6kaqhKI146w", ) """ - _response = self._client_wrapper.httpx_client.request( + with self._client_wrapper.httpx_client.stream( f"v1/pronunciation-dictionaries/{jsonable_encoder(dictionary_id)}/{jsonable_encoder(version_id)}/download", method="GET", request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return _response.text # type: ignore - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), + ) as _response: + try: + if 200 <= _response.status_code < 300: + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): + yield _chunk + return + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) def get( self, pronunciation_dictionary_id: str, *, request_options: typing.Optional[RequestOptions] = None @@ -490,7 +494,7 @@ async def add_from_file( A description of the pronunciation dictionary, used for identification only. workspace_access : typing.Optional[PronunciationDictionaryAddFromFileRequestWorkspaceAccess] - Should be one of 'editor' or 'viewer'. If not provided, defaults to no access. + Should be one of 'admin', 'editor' or 'viewer'. If not provided, defaults to no access. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -604,8 +608,8 @@ async def main() -> None: pronunciation_dictionary_id="21m00Tcm4TlvDq8ikWAM", rules=[ PronunciationDictionaryRule_Alias( - string_to_replace="string_to_replace", - alias="alias", + string_to_replace="Thailand", + alias="tie-land", ) ], ) @@ -735,9 +739,9 @@ async def main() -> None: async def download( self, dictionary_id: str, version_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> str: + ) -> typing.AsyncIterator[bytes]: """ - Get PLS file with a pronunciation dictionary version rules + Get a PLS file with a pronunciation dictionary version rules Parameters ---------- @@ -748,12 +752,12 @@ async def download( The id of the version of the pronunciation dictionary request_options : typing.Optional[RequestOptions] - Request-specific configuration. + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. - Returns - ------- - str - Successful Response + Yields + ------ + typing.AsyncIterator[bytes] + The PLS file containing pronunciation dictionary rules Examples -------- @@ -775,28 +779,32 @@ async def main() -> None: asyncio.run(main()) """ - _response = await self._client_wrapper.httpx_client.request( + async with self._client_wrapper.httpx_client.stream( f"v1/pronunciation-dictionaries/{jsonable_encoder(dictionary_id)}/{jsonable_encoder(version_id)}/download", method="GET", request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return _response.text # type: ignore - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - construct_type( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), + ) as _response: + try: + if 200 <= _response.status_code < 300: + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): + yield _chunk + return + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) async def get( self, pronunciation_dictionary_id: str, *, request_options: typing.Optional[RequestOptions] = None diff --git a/src/elevenlabs/speech_to_speech/client.py b/src/elevenlabs/speech_to_speech/client.py index 207f087f..86400804 100644 --- a/src/elevenlabs/speech_to_speech/client.py +++ b/src/elevenlabs/speech_to_speech/client.py @@ -66,13 +66,13 @@ def convert( Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. voice_settings : typing.Optional[str] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. seed : typing.Optional[int] If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. remove_background_noise : typing.Optional[bool] - If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + If set, will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. request_options : typing.Optional[RequestOptions] Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. @@ -182,13 +182,13 @@ def convert_as_stream( Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. voice_settings : typing.Optional[str] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. seed : typing.Optional[int] If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. remove_background_noise : typing.Optional[bool] - If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + If set, will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. request_options : typing.Optional[RequestOptions] Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. @@ -303,13 +303,13 @@ async def convert( Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. voice_settings : typing.Optional[str] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. seed : typing.Optional[int] If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. remove_background_noise : typing.Optional[bool] - If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + If set, will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. request_options : typing.Optional[RequestOptions] Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. @@ -427,13 +427,13 @@ async def convert_as_stream( Identifier of the model that will be used, you can query them using GET /v1/models. The model needs to have support for speech to speech, you can check this using the can_do_voice_conversion property. voice_settings : typing.Optional[str] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. Needs to be send as a JSON encoded string. seed : typing.Optional[int] If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed. Must be integer between 0 and 4294967295. remove_background_noise : typing.Optional[bool] - If set will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. + If set, will remove the background noise from your audio input using our audio isolation model. Only applies to Voice Changer. request_options : typing.Optional[RequestOptions] Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. diff --git a/src/elevenlabs/speech_to_text/client.py b/src/elevenlabs/speech_to_text/client.py index 674f31d1..ee54d860 100644 --- a/src/elevenlabs/speech_to_text/client.py +++ b/src/elevenlabs/speech_to_text/client.py @@ -26,6 +26,7 @@ def convert( *, model_id: str, file: core.File, + enable_logging: typing.Optional[bool] = None, language_code: typing.Optional[str] = OMIT, tag_audio_events: typing.Optional[bool] = OMIT, num_speakers: typing.Optional[int] = OMIT, @@ -44,6 +45,9 @@ def convert( file : core.File See core.File for more documentation + enable_logging : typing.Optional[bool] + When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers. + language_code : typing.Optional[str] An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically. @@ -57,7 +61,7 @@ def convert( The granularity of the timestamps in the transcription. 'word' provides word-level timestamps and 'character' provides character-level timestamps per word. diarize : typing.Optional[bool] - Whether to annotate which speaker is currently talking in the uploaded file. Enabling this will limit the maximum duration of your inputs to 8 minutes. + Whether to annotate which speaker is currently talking in the uploaded file. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -81,6 +85,9 @@ def convert( _response = self._client_wrapper.httpx_client.request( "v1/speech-to-text", method="POST", + params={ + "enable_logging": enable_logging, + }, data={ "model_id": model_id, "language_code": language_code, @@ -129,6 +136,7 @@ async def convert( *, model_id: str, file: core.File, + enable_logging: typing.Optional[bool] = None, language_code: typing.Optional[str] = OMIT, tag_audio_events: typing.Optional[bool] = OMIT, num_speakers: typing.Optional[int] = OMIT, @@ -147,6 +155,9 @@ async def convert( file : core.File See core.File for more documentation + enable_logging : typing.Optional[bool] + When enable_logging is set to false zero retention mode will be used for the request. This will mean history features are unavailable for this request, including request stitching. Zero retention mode may only be used by enterprise customers. + language_code : typing.Optional[str] An ISO-639-1 or ISO-639-3 language_code corresponding to the language of the audio file. Can sometimes improve transcription performance if known beforehand. Defaults to null, in this case the language is predicted automatically. @@ -160,7 +171,7 @@ async def convert( The granularity of the timestamps in the transcription. 'word' provides word-level timestamps and 'character' provides character-level timestamps per word. diarize : typing.Optional[bool] - Whether to annotate which speaker is currently talking in the uploaded file. Enabling this will limit the maximum duration of your inputs to 8 minutes. + Whether to annotate which speaker is currently talking in the uploaded file. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -192,6 +203,9 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v1/speech-to-text", method="POST", + params={ + "enable_logging": enable_logging, + }, data={ "model_id": model_id, "language_code": language_code, diff --git a/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_timestamps_granularity.py b/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_timestamps_granularity.py index 18d12c43..f01a8f83 100644 --- a/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_timestamps_granularity.py +++ b/src/elevenlabs/speech_to_text/types/speech_to_text_convert_request_timestamps_granularity.py @@ -2,4 +2,4 @@ import typing -SpeechToTextConvertRequestTimestampsGranularity = typing.Union[typing.Literal["word", "character"], typing.Any] +SpeechToTextConvertRequestTimestampsGranularity = typing.Union[typing.Literal["none", "word", "character"], typing.Any] diff --git a/src/elevenlabs/studio/chapters/client.py b/src/elevenlabs/studio/chapters/client.py index 14c0e512..61b042eb 100644 --- a/src/elevenlabs/studio/chapters/client.py +++ b/src/elevenlabs/studio/chapters/client.py @@ -15,7 +15,10 @@ from ...types.chapter_content_input_model import ChapterContentInputModel from ...types.edit_chapter_response_model import EditChapterResponseModel from ...core.serialization import convert_and_respect_annotation_metadata +from ...types.delete_chapter_response_model import DeleteChapterResponseModel +from ...types.convert_chapter_response_model import ConvertChapterResponseModel from ...types.chapter_snapshots_response import ChapterSnapshotsResponse +from ...types.chapter_snapshot_extended_response_model import ChapterSnapshotExtendedResponseModel from ...core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters @@ -314,7 +317,7 @@ def edit( def delete( self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteChapterResponseModel: """ Deletes a chapter. @@ -331,7 +334,7 @@ def delete( Returns ------- - typing.Optional[typing.Any] + DeleteChapterResponseModel Successful Response Examples @@ -354,9 +357,9 @@ def delete( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteChapterResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteChapterResponseModel, # type: ignore object_=_response.json(), ), ) @@ -377,7 +380,7 @@ def delete( def convert( self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> ConvertChapterResponseModel: """ Starts conversion of a specific chapter. @@ -394,7 +397,7 @@ def convert( Returns ------- - typing.Optional[typing.Any] + ConvertChapterResponseModel Successful Response Examples @@ -417,9 +420,9 @@ def convert( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + ConvertChapterResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=ConvertChapterResponseModel, # type: ignore object_=_response.json(), ), ) @@ -442,7 +445,7 @@ def get_all_snapshots( self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> ChapterSnapshotsResponse: """ - Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created. + Gets information about all the snapshots of a chapter. Each snapshot can be downloaded as audio. Whenever a chapter is converted a snapshot will automatically be created. Parameters ---------- @@ -501,17 +504,16 @@ def get_all_snapshots( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def stream_snapshot( + def get_chapter_snapshot( self, project_id: str, chapter_id: str, chapter_snapshot_id: str, *, - convert_to_mpeg: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> None: + ) -> ChapterSnapshotExtendedResponseModel: """ - Stream the audio from a chapter snapshot. Use `GET /v1/studio/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the snapshots of a chapter. + Returns the chapter snapshot. Parameters ---------- @@ -524,15 +526,13 @@ def stream_snapshot( chapter_snapshot_id : str The ID of the chapter snapshot. - convert_to_mpeg : typing.Optional[bool] - Whether to convert the audio to mpeg format. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + ChapterSnapshotExtendedResponseModel + Successful Response Examples -------- @@ -541,27 +541,26 @@ def stream_snapshot( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.studio.chapters.stream_snapshot( + client.studio.chapters.get_chapter_snapshot( project_id="21m00Tcm4TlvDq8ikWAM", chapter_id="21m00Tcm4TlvDq8ikWAM", chapter_snapshot_id="21m00Tcm4TlvDq8ikWAM", ) """ _response = self._client_wrapper.httpx_client.request( - f"v1/studio/projects/{jsonable_encoder(project_id)}/chapters/{jsonable_encoder(chapter_id)}/snapshots/{jsonable_encoder(chapter_snapshot_id)}/stream", - method="POST", - json={ - "convert_to_mpeg": convert_to_mpeg, - }, - headers={ - "content-type": "application/json", - }, + f"v1/studio/projects/{jsonable_encoder(project_id)}/chapters/{jsonable_encoder(chapter_id)}/snapshots/{jsonable_encoder(chapter_snapshot_id)}", + method="GET", request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return + return typing.cast( + ChapterSnapshotExtendedResponseModel, + construct_type( + type_=ChapterSnapshotExtendedResponseModel, # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( @@ -577,6 +576,74 @@ def stream_snapshot( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def stream_snapshot( + self, + project_id: str, + chapter_id: str, + chapter_snapshot_id: str, + *, + convert_to_mpeg: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.Iterator[bytes]: + """ + Stream the audio from a chapter snapshot. Use `GET /v1/studio/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the snapshots of a chapter. + + Parameters + ---------- + project_id : str + The ID of the Studio project. + + chapter_id : str + The ID of the chapter. + + chapter_snapshot_id : str + The ID of the chapter snapshot. + + convert_to_mpeg : typing.Optional[bool] + Whether to convert the audio to mpeg format. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + + Yields + ------ + typing.Iterator[bytes] + Streaming audio data + """ + with self._client_wrapper.httpx_client.stream( + f"v1/studio/projects/{jsonable_encoder(project_id)}/chapters/{jsonable_encoder(chapter_id)}/snapshots/{jsonable_encoder(chapter_snapshot_id)}/stream", + method="POST", + json={ + "convert_to_mpeg": convert_to_mpeg, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + try: + if 200 <= _response.status_code < 300: + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): + yield _chunk + return + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + class AsyncChaptersClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): @@ -902,7 +969,7 @@ async def main() -> None: async def delete( self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteChapterResponseModel: """ Deletes a chapter. @@ -919,7 +986,7 @@ async def delete( Returns ------- - typing.Optional[typing.Any] + DeleteChapterResponseModel Successful Response Examples @@ -950,9 +1017,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteChapterResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteChapterResponseModel, # type: ignore object_=_response.json(), ), ) @@ -973,7 +1040,7 @@ async def main() -> None: async def convert( self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> ConvertChapterResponseModel: """ Starts conversion of a specific chapter. @@ -990,7 +1057,7 @@ async def convert( Returns ------- - typing.Optional[typing.Any] + ConvertChapterResponseModel Successful Response Examples @@ -1021,9 +1088,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + ConvertChapterResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=ConvertChapterResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1046,7 +1113,7 @@ async def get_all_snapshots( self, project_id: str, chapter_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> ChapterSnapshotsResponse: """ - Gets information about all the snapshots of a chapter, each snapshot corresponds can be downloaded as audio. Whenever a chapter is converted a snapshot will be automatically created. + Gets information about all the snapshots of a chapter. Each snapshot can be downloaded as audio. Whenever a chapter is converted a snapshot will automatically be created. Parameters ---------- @@ -1113,17 +1180,16 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def stream_snapshot( + async def get_chapter_snapshot( self, project_id: str, chapter_id: str, chapter_snapshot_id: str, *, - convert_to_mpeg: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> None: + ) -> ChapterSnapshotExtendedResponseModel: """ - Stream the audio from a chapter snapshot. Use `GET /v1/studio/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the snapshots of a chapter. + Returns the chapter snapshot. Parameters ---------- @@ -1136,15 +1202,13 @@ async def stream_snapshot( chapter_snapshot_id : str The ID of the chapter snapshot. - convert_to_mpeg : typing.Optional[bool] - Whether to convert the audio to mpeg format. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + ChapterSnapshotExtendedResponseModel + Successful Response Examples -------- @@ -1158,7 +1222,7 @@ async def stream_snapshot( async def main() -> None: - await client.studio.chapters.stream_snapshot( + await client.studio.chapters.get_chapter_snapshot( project_id="21m00Tcm4TlvDq8ikWAM", chapter_id="21m00Tcm4TlvDq8ikWAM", chapter_snapshot_id="21m00Tcm4TlvDq8ikWAM", @@ -1168,20 +1232,19 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/studio/projects/{jsonable_encoder(project_id)}/chapters/{jsonable_encoder(chapter_id)}/snapshots/{jsonable_encoder(chapter_snapshot_id)}/stream", - method="POST", - json={ - "convert_to_mpeg": convert_to_mpeg, - }, - headers={ - "content-type": "application/json", - }, + f"v1/studio/projects/{jsonable_encoder(project_id)}/chapters/{jsonable_encoder(chapter_id)}/snapshots/{jsonable_encoder(chapter_snapshot_id)}", + method="GET", request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return + return typing.cast( + ChapterSnapshotExtendedResponseModel, + construct_type( + type_=ChapterSnapshotExtendedResponseModel, # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( @@ -1196,3 +1259,71 @@ async def main() -> None: except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + + async def stream_snapshot( + self, + project_id: str, + chapter_id: str, + chapter_snapshot_id: str, + *, + convert_to_mpeg: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> typing.AsyncIterator[bytes]: + """ + Stream the audio from a chapter snapshot. Use `GET /v1/studio/projects/{project_id}/chapters/{chapter_id}/snapshots` to return the snapshots of a chapter. + + Parameters + ---------- + project_id : str + The ID of the Studio project. + + chapter_id : str + The ID of the chapter. + + chapter_snapshot_id : str + The ID of the chapter snapshot. + + convert_to_mpeg : typing.Optional[bool] + Whether to convert the audio to mpeg format. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + + Yields + ------ + typing.AsyncIterator[bytes] + Streaming audio data + """ + async with self._client_wrapper.httpx_client.stream( + f"v1/studio/projects/{jsonable_encoder(project_id)}/chapters/{jsonable_encoder(chapter_id)}/snapshots/{jsonable_encoder(chapter_snapshot_id)}/stream", + method="POST", + json={ + "convert_to_mpeg": convert_to_mpeg, + }, + headers={ + "content-type": "application/json", + }, + request_options=request_options, + omit=OMIT, + ) as _response: + try: + if 200 <= _response.status_code < 300: + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): + yield _chunk + return + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/elevenlabs/studio/client.py b/src/elevenlabs/studio/client.py index 7086fbe6..3ed7a862 100644 --- a/src/elevenlabs/studio/client.py +++ b/src/elevenlabs/studio/client.py @@ -56,7 +56,7 @@ def create_podcast( The ID of the model to be used for this Studio project, you can query GET /v1/models to list all available models. mode : BodyCreatePodcastV1StudioPodcastsPostMode - The type of podcast to generate + The type of podcast to generate. Can be 'conversation', an interaction between two voices, or 'bulletin', a monologue. source : BodyCreatePodcastV1StudioPodcastsPostSource The source content for the Podcast. @@ -111,12 +111,12 @@ def create_podcast( model_id="21m00Tcm4TlvDq8ikWAM", mode=BodyCreatePodcastV1StudioPodcastsPostMode_Conversation( conversation=PodcastConversationModeData( - host_voice_id="host_voice_id", - guest_voice_id="guest_voice_id", + host_voice_id="aw1NgEzBg83R7vgmiJt6", + guest_voice_id="aw1NgEzBg83R7vgmiJt7", ), ), source=PodcastTextSource( - text="text", + text="This is a test podcast.", ), ) """ @@ -196,7 +196,7 @@ async def create_podcast( The ID of the model to be used for this Studio project, you can query GET /v1/models to list all available models. mode : BodyCreatePodcastV1StudioPodcastsPostMode - The type of podcast to generate + The type of podcast to generate. Can be 'conversation', an interaction between two voices, or 'bulletin', a monologue. source : BodyCreatePodcastV1StudioPodcastsPostSource The source content for the Podcast. @@ -256,12 +256,12 @@ async def main() -> None: model_id="21m00Tcm4TlvDq8ikWAM", mode=BodyCreatePodcastV1StudioPodcastsPostMode_Conversation( conversation=PodcastConversationModeData( - host_voice_id="host_voice_id", - guest_voice_id="guest_voice_id", + host_voice_id="aw1NgEzBg83R7vgmiJt6", + guest_voice_id="aw1NgEzBg83R7vgmiJt7", ), ), source=PodcastTextSource( - text="text", + text="This is a test podcast.", ), ) diff --git a/src/elevenlabs/studio/projects/client.py b/src/elevenlabs/studio/projects/client.py index b0a4d936..60ffe3fb 100644 --- a/src/elevenlabs/studio/projects/client.py +++ b/src/elevenlabs/studio/projects/client.py @@ -17,8 +17,12 @@ from ...types.project_extended_response_model import ProjectExtendedResponseModel from ...core.jsonable_encoder import jsonable_encoder from ...types.edit_project_response_model import EditProjectResponseModel +from ...types.delete_project_response_model import DeleteProjectResponseModel +from ...types.convert_project_response_model import ConvertProjectResponseModel from ...types.project_snapshots_response import ProjectSnapshotsResponse +from ...types.project_snapshot_extended_response_model import ProjectSnapshotExtendedResponseModel from ...types.pronunciation_dictionary_version_locator import PronunciationDictionaryVersionLocator +from ...types.create_pronunciation_dictionary_response_model import CreatePronunciationDictionaryResponseModel from ...core.serialization import convert_and_respect_annotation_metadata from ...core.client_wrapper import AsyncClientWrapper @@ -356,7 +360,7 @@ def update_metadata( request_options: typing.Optional[RequestOptions] = None, ) -> EditProjectResponseModel: """ - Updates Studio project metadata. + Updates the specified Studio project by setting the values of the parameters passed. Parameters ---------- @@ -450,7 +454,7 @@ def update_metadata( def delete( self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteProjectResponseModel: """ Deletes a Studio project. @@ -464,7 +468,7 @@ def delete( Returns ------- - typing.Optional[typing.Any] + DeleteProjectResponseModel Successful Response Examples @@ -486,9 +490,9 @@ def delete( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteProjectResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteProjectResponseModel, # type: ignore object_=_response.json(), ), ) @@ -591,7 +595,7 @@ def update_content( def convert( self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> ConvertProjectResponseModel: """ Starts conversion of a Studio project and all of its chapters. @@ -605,7 +609,7 @@ def convert( Returns ------- - typing.Optional[typing.Any] + ConvertProjectResponseModel Successful Response Examples @@ -627,9 +631,9 @@ def convert( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + ConvertProjectResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=ConvertProjectResponseModel, # type: ignore object_=_response.json(), ), ) @@ -652,7 +656,7 @@ def get_snapshots( self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> ProjectSnapshotsResponse: """ - Gets the snapshots of a Studio project. + Retrieves a list of snapshots for a Studio project. Parameters ---------- @@ -707,16 +711,11 @@ def get_snapshots( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def stream_audio( - self, - project_id: str, - project_snapshot_id: str, - *, - convert_to_mpeg: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> None: + def get_project_snapshot( + self, project_id: str, project_snapshot_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> ProjectSnapshotExtendedResponseModel: """ - Stream the audio from a Studio project snapshot. + Returns the project snapshot. Parameters ---------- @@ -726,15 +725,13 @@ def stream_audio( project_snapshot_id : str The ID of the Studio project snapshot. - convert_to_mpeg : typing.Optional[bool] - Whether to convert the audio to mpeg format. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + ProjectSnapshotExtendedResponseModel + Successful Response Examples -------- @@ -743,26 +740,25 @@ def stream_audio( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.studio.projects.stream_audio( + client.studio.projects.get_project_snapshot( project_id="21m00Tcm4TlvDq8ikWAM", project_snapshot_id="21m00Tcm4TlvDq8ikWAM", ) """ _response = self._client_wrapper.httpx_client.request( - f"v1/studio/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/stream", - method="POST", - json={ - "convert_to_mpeg": convert_to_mpeg, - }, - headers={ - "content-type": "application/json", - }, + f"v1/studio/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}", + method="GET", request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return + return typing.cast( + ProjectSnapshotExtendedResponseModel, + construct_type( + type_=ProjectSnapshotExtendedResponseModel, # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( @@ -778,11 +774,16 @@ def stream_audio( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def stream_archive( - self, project_id: str, project_snapshot_id: str, *, request_options: typing.Optional[RequestOptions] = None + def stream_audio( + self, + project_id: str, + project_snapshot_id: str, + *, + convert_to_mpeg: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> None: """ - Returns a compressed archive of the Studio project's audio. + Stream the audio from a Studio project snapshot. Parameters ---------- @@ -792,6 +793,9 @@ def stream_archive( project_snapshot_id : str The ID of the Studio project snapshot. + convert_to_mpeg : typing.Optional[bool] + Whether to convert the audio to mpeg format. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -806,15 +810,22 @@ def stream_archive( client = ElevenLabs( api_key="YOUR_API_KEY", ) - client.studio.projects.stream_archive( + client.studio.projects.stream_audio( project_id="21m00Tcm4TlvDq8ikWAM", project_snapshot_id="21m00Tcm4TlvDq8ikWAM", ) """ _response = self._client_wrapper.httpx_client.request( - f"v1/studio/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/archive", + f"v1/studio/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/stream", method="POST", + json={ + "convert_to_mpeg": convert_to_mpeg, + }, + headers={ + "content-type": "application/json", + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: @@ -834,6 +845,55 @@ def stream_archive( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + def stream_archive( + self, project_id: str, project_snapshot_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Iterator[bytes]: + """ + Returns a compressed archive of the Studio project's audio. + + Parameters + ---------- + project_id : str + The ID of the Studio project. + + project_snapshot_id : str + The ID of the Studio project snapshot. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + + Yields + ------ + typing.Iterator[bytes] + Streaming archive data + """ + with self._client_wrapper.httpx_client.stream( + f"v1/studio/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/archive", + method="POST", + request_options=request_options, + ) as _response: + try: + if 200 <= _response.status_code < 300: + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + for _chunk in _response.iter_bytes(chunk_size=_chunk_size): + yield _chunk + return + _response.read() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def update_pronunciation_dictionaries( self, project_id: str, @@ -841,7 +901,7 @@ def update_pronunciation_dictionaries( pronunciation_dictionary_locators: typing.Sequence[PronunciationDictionaryVersionLocator], invalidate_affected_text: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> CreatePronunciationDictionaryResponseModel: """ Create a set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does. @@ -861,7 +921,7 @@ def update_pronunciation_dictionaries( Returns ------- - typing.Optional[typing.Any] + CreatePronunciationDictionaryResponseModel Successful Response Examples @@ -901,9 +961,9 @@ def update_pronunciation_dictionaries( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + CreatePronunciationDictionaryResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=CreatePronunciationDictionaryResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1277,7 +1337,7 @@ async def update_metadata( request_options: typing.Optional[RequestOptions] = None, ) -> EditProjectResponseModel: """ - Updates Studio project metadata. + Updates the specified Studio project by setting the values of the parameters passed. Parameters ---------- @@ -1379,7 +1439,7 @@ async def main() -> None: async def delete( self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteProjectResponseModel: """ Deletes a Studio project. @@ -1393,7 +1453,7 @@ async def delete( Returns ------- - typing.Optional[typing.Any] + DeleteProjectResponseModel Successful Response Examples @@ -1423,9 +1483,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteProjectResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteProjectResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1536,7 +1596,7 @@ async def main() -> None: async def convert( self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> ConvertProjectResponseModel: """ Starts conversion of a Studio project and all of its chapters. @@ -1550,7 +1610,7 @@ async def convert( Returns ------- - typing.Optional[typing.Any] + ConvertProjectResponseModel Successful Response Examples @@ -1580,9 +1640,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + ConvertProjectResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=ConvertProjectResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1605,7 +1665,7 @@ async def get_snapshots( self, project_id: str, *, request_options: typing.Optional[RequestOptions] = None ) -> ProjectSnapshotsResponse: """ - Gets the snapshots of a Studio project. + Retrieves a list of snapshots for a Studio project. Parameters ---------- @@ -1668,16 +1728,11 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def stream_audio( - self, - project_id: str, - project_snapshot_id: str, - *, - convert_to_mpeg: typing.Optional[bool] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> None: + async def get_project_snapshot( + self, project_id: str, project_snapshot_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> ProjectSnapshotExtendedResponseModel: """ - Stream the audio from a Studio project snapshot. + Returns the project snapshot. Parameters ---------- @@ -1687,15 +1742,13 @@ async def stream_audio( project_snapshot_id : str The ID of the Studio project snapshot. - convert_to_mpeg : typing.Optional[bool] - Whether to convert the audio to mpeg format. - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - None + ProjectSnapshotExtendedResponseModel + Successful Response Examples -------- @@ -1709,7 +1762,7 @@ async def stream_audio( async def main() -> None: - await client.studio.projects.stream_audio( + await client.studio.projects.get_project_snapshot( project_id="21m00Tcm4TlvDq8ikWAM", project_snapshot_id="21m00Tcm4TlvDq8ikWAM", ) @@ -1718,20 +1771,19 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/studio/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/stream", - method="POST", - json={ - "convert_to_mpeg": convert_to_mpeg, - }, - headers={ - "content-type": "application/json", - }, + f"v1/studio/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}", + method="GET", request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - return + return typing.cast( + ProjectSnapshotExtendedResponseModel, + construct_type( + type_=ProjectSnapshotExtendedResponseModel, # type: ignore + object_=_response.json(), + ), + ) if _response.status_code == 422: raise UnprocessableEntityError( typing.cast( @@ -1747,11 +1799,16 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def stream_archive( - self, project_id: str, project_snapshot_id: str, *, request_options: typing.Optional[RequestOptions] = None + async def stream_audio( + self, + project_id: str, + project_snapshot_id: str, + *, + convert_to_mpeg: typing.Optional[bool] = OMIT, + request_options: typing.Optional[RequestOptions] = None, ) -> None: """ - Returns a compressed archive of the Studio project's audio. + Stream the audio from a Studio project snapshot. Parameters ---------- @@ -1761,6 +1818,9 @@ async def stream_archive( project_snapshot_id : str The ID of the Studio project snapshot. + convert_to_mpeg : typing.Optional[bool] + Whether to convert the audio to mpeg format. + request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -1780,7 +1840,7 @@ async def stream_archive( async def main() -> None: - await client.studio.projects.stream_archive( + await client.studio.projects.stream_audio( project_id="21m00Tcm4TlvDq8ikWAM", project_snapshot_id="21m00Tcm4TlvDq8ikWAM", ) @@ -1789,9 +1849,16 @@ async def main() -> None: asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - f"v1/studio/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/archive", + f"v1/studio/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/stream", method="POST", + json={ + "convert_to_mpeg": convert_to_mpeg, + }, + headers={ + "content-type": "application/json", + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: @@ -1811,6 +1878,55 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) + async def stream_archive( + self, project_id: str, project_snapshot_id: str, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.AsyncIterator[bytes]: + """ + Returns a compressed archive of the Studio project's audio. + + Parameters + ---------- + project_id : str + The ID of the Studio project. + + project_snapshot_id : str + The ID of the Studio project snapshot. + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. You can pass in configuration such as `chunk_size`, and more to customize the request and response. + + Yields + ------ + typing.AsyncIterator[bytes] + Streaming archive data + """ + async with self._client_wrapper.httpx_client.stream( + f"v1/studio/projects/{jsonable_encoder(project_id)}/snapshots/{jsonable_encoder(project_snapshot_id)}/archive", + method="POST", + request_options=request_options, + ) as _response: + try: + if 200 <= _response.status_code < 300: + _chunk_size = request_options.get("chunk_size", 1024) if request_options is not None else 1024 + async for _chunk in _response.aiter_bytes(chunk_size=_chunk_size): + yield _chunk + return + await _response.aread() + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + construct_type( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def update_pronunciation_dictionaries( self, project_id: str, @@ -1818,7 +1934,7 @@ async def update_pronunciation_dictionaries( pronunciation_dictionary_locators: typing.Sequence[PronunciationDictionaryVersionLocator], invalidate_affected_text: typing.Optional[bool] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> CreatePronunciationDictionaryResponseModel: """ Create a set of pronunciation dictionaries acting on a project. This will automatically mark text within this project as requiring reconverting where the new dictionary would apply or the old one no longer does. @@ -1838,7 +1954,7 @@ async def update_pronunciation_dictionaries( Returns ------- - typing.Optional[typing.Any] + CreatePronunciationDictionaryResponseModel Successful Response Examples @@ -1886,9 +2002,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + CreatePronunciationDictionaryResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=CreatePronunciationDictionaryResponseModel, # type: ignore object_=_response.json(), ), ) diff --git a/src/elevenlabs/studio/types/body_create_podcast_v_1_studio_podcasts_post_mode.py b/src/elevenlabs/studio/types/body_create_podcast_v_1_studio_podcasts_post_mode.py index fa826714..cccf67ab 100644 --- a/src/elevenlabs/studio/types/body_create_podcast_v_1_studio_podcasts_post_mode.py +++ b/src/elevenlabs/studio/types/body_create_podcast_v_1_studio_podcasts_post_mode.py @@ -13,7 +13,7 @@ class BodyCreatePodcastV1StudioPodcastsPostMode_Conversation(UncheckedBaseModel): """ - The type of podcast to generate + The type of podcast to generate. Can be 'conversation', an interaction between two voices, or 'bulletin', a monologue. """ type: typing.Literal["conversation"] = "conversation" @@ -31,7 +31,7 @@ class Config: class BodyCreatePodcastV1StudioPodcastsPostMode_Bulletin(UncheckedBaseModel): """ - The type of podcast to generate + The type of podcast to generate. Can be 'conversation', an interaction between two voices, or 'bulletin', a monologue. """ type: typing.Literal["bulletin"] = "bulletin" diff --git a/src/elevenlabs/text_to_sound_effects/__init__.py b/src/elevenlabs/text_to_sound_effects/__init__.py index f3ea2659..7995a106 100644 --- a/src/elevenlabs/text_to_sound_effects/__init__.py +++ b/src/elevenlabs/text_to_sound_effects/__init__.py @@ -1,2 +1,5 @@ # This file was auto-generated by Fern from our API Definition. +from .types import TextToSoundEffectsConvertRequestOutputFormat + +__all__ = ["TextToSoundEffectsConvertRequestOutputFormat"] diff --git a/src/elevenlabs/text_to_sound_effects/client.py b/src/elevenlabs/text_to_sound_effects/client.py index f562fef6..3520edf5 100644 --- a/src/elevenlabs/text_to_sound_effects/client.py +++ b/src/elevenlabs/text_to_sound_effects/client.py @@ -2,6 +2,7 @@ import typing from ..core.client_wrapper import SyncClientWrapper +from .types.text_to_sound_effects_convert_request_output_format import TextToSoundEffectsConvertRequestOutputFormat from ..core.request_options import RequestOptions from ..errors.unprocessable_entity_error import UnprocessableEntityError from ..types.http_validation_error import HttpValidationError @@ -22,18 +23,22 @@ def convert( self, *, text: str, + output_format: typing.Optional[TextToSoundEffectsConvertRequestOutputFormat] = None, duration_seconds: typing.Optional[float] = OMIT, prompt_influence: typing.Optional[float] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.Iterator[bytes]: """ - Converts a text of your choice into sound + Turn text into sound effects for your videos, voice-overs or video games using the most advanced sound effects model in the world. Parameters ---------- text : str The text that will get converted into a sound effect. + output_format : typing.Optional[TextToSoundEffectsConvertRequestOutputFormat] + Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs. + duration_seconds : typing.Optional[float] The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None. @@ -46,7 +51,7 @@ def convert( Yields ------ typing.Iterator[bytes] - Successful Response + The generated sound effect as an MP3 file Examples -------- @@ -62,6 +67,9 @@ def convert( with self._client_wrapper.httpx_client.stream( "v1/sound-generation", method="POST", + params={ + "output_format": output_format, + }, json={ "text": text, "duration_seconds": duration_seconds, @@ -104,18 +112,22 @@ async def convert( self, *, text: str, + output_format: typing.Optional[TextToSoundEffectsConvertRequestOutputFormat] = None, duration_seconds: typing.Optional[float] = OMIT, prompt_influence: typing.Optional[float] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> typing.AsyncIterator[bytes]: """ - Converts a text of your choice into sound + Turn text into sound effects for your videos, voice-overs or video games using the most advanced sound effects model in the world. Parameters ---------- text : str The text that will get converted into a sound effect. + output_format : typing.Optional[TextToSoundEffectsConvertRequestOutputFormat] + Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs. + duration_seconds : typing.Optional[float] The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 22. If set to None we will guess the optimal duration using the prompt. Defaults to None. @@ -128,7 +140,7 @@ async def convert( Yields ------ typing.AsyncIterator[bytes] - Successful Response + The generated sound effect as an MP3 file Examples -------- @@ -152,6 +164,9 @@ async def main() -> None: async with self._client_wrapper.httpx_client.stream( "v1/sound-generation", method="POST", + params={ + "output_format": output_format, + }, json={ "text": text, "duration_seconds": duration_seconds, diff --git a/src/elevenlabs/text_to_sound_effects/types/__init__.py b/src/elevenlabs/text_to_sound_effects/types/__init__.py new file mode 100644 index 00000000..206b36de --- /dev/null +++ b/src/elevenlabs/text_to_sound_effects/types/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .text_to_sound_effects_convert_request_output_format import TextToSoundEffectsConvertRequestOutputFormat + +__all__ = ["TextToSoundEffectsConvertRequestOutputFormat"] diff --git a/src/elevenlabs/text_to_sound_effects/types/text_to_sound_effects_convert_request_output_format.py b/src/elevenlabs/text_to_sound_effects/types/text_to_sound_effects_convert_request_output_format.py new file mode 100644 index 00000000..a2324132 --- /dev/null +++ b/src/elevenlabs/text_to_sound_effects/types/text_to_sound_effects_convert_request_output_format.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TextToSoundEffectsConvertRequestOutputFormat = typing.Union[ + typing.Literal[ + "mp3_22050_32", + "mp3_44100_32", + "mp3_44100_64", + "mp3_44100_96", + "mp3_44100_128", + "mp3_44100_192", + "pcm_8000", + "pcm_16000", + "pcm_22050", + "pcm_24000", + "pcm_44100", + "ulaw_8000", + ], + typing.Any, +] diff --git a/src/elevenlabs/text_to_speech/client.py b/src/elevenlabs/text_to_speech/client.py index 78116583..87eca942 100644 --- a/src/elevenlabs/text_to_speech/client.py +++ b/src/elevenlabs/text_to_speech/client.py @@ -97,7 +97,7 @@ def convert( Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. voice_settings : typing.Optional[VoiceSettings] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]] A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request @@ -260,7 +260,7 @@ def convert_with_timestamps( Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. voice_settings : typing.Optional[VoiceSettings] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]] A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request @@ -423,7 +423,7 @@ def convert_as_stream( Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. voice_settings : typing.Optional[VoiceSettings] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]] A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request @@ -586,7 +586,7 @@ def stream_with_timestamps( Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. voice_settings : typing.Optional[VoiceSettings] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]] A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request @@ -766,7 +766,7 @@ async def convert( Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. voice_settings : typing.Optional[VoiceSettings] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]] A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request @@ -937,7 +937,7 @@ async def convert_with_timestamps( Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. voice_settings : typing.Optional[VoiceSettings] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]] A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request @@ -1108,7 +1108,7 @@ async def convert_as_stream( Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. voice_settings : typing.Optional[VoiceSettings] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]] A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request @@ -1279,7 +1279,7 @@ async def stream_with_timestamps( Language code (ISO 639-1) used to enforce a language for the model. Currently only Turbo v2.5 and Flash v2.5 support language enforcement. For other models, an error will be returned if language code is provided. voice_settings : typing.Optional[VoiceSettings] - Voice settings overriding stored setttings for the given voice. They are applied only on the given request. + Voice settings overriding stored settings for the given voice. They are applied only on the given request. pronunciation_dictionary_locators : typing.Optional[typing.Sequence[PronunciationDictionaryVersionLocator]] A list of pronunciation dictionary locators (id, version_id) to be applied to the text. They will be applied in order. You may have up to 3 locators per request diff --git a/src/elevenlabs/text_to_voice/client.py b/src/elevenlabs/text_to_voice/client.py index ce03c69f..c5240469 100644 --- a/src/elevenlabs/text_to_voice/client.py +++ b/src/elevenlabs/text_to_voice/client.py @@ -42,18 +42,7 @@ def create_previews( Text to generate, text length has to be between 100 and 1000. output_format : typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat] - Output format of the generated audio. Must be one of: - mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps. - mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps. - mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps. - mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps. - mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps. - mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above. - pcm_16000 - PCM format (S16LE) with 16kHz sample rate. - pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate. - pcm_24000 - PCM format (S16LE) with 24kHz sample rate. - pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above. - ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs. + Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs. auto_generate_text : typing.Optional[bool] Whether to automatically generate a text suitable for the voice description. @@ -74,7 +63,7 @@ def create_previews( api_key="YOUR_API_KEY", ) client.text_to_voice.create_previews( - voice_description="A sassy little squeaky mouse", + voice_description="A sassy squeaky mouse", text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.", ) """ @@ -130,7 +119,7 @@ def create_voice_from_preview( request_options: typing.Optional[RequestOptions] = None, ) -> Voice: """ - Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews. + Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using POST /v1/text-to-voice/create-previews. Parameters ---------- @@ -141,7 +130,7 @@ def create_voice_from_preview( Description to use for the created voice. generated_voice_id : str - The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. + The generated_voice_id to create, call POST /v1/text-to-voice/create-previews and fetch the generated_voice_id from the response header if don't have one yet. labels : typing.Optional[typing.Dict[str, str]] Optional, metadata to add to the created voice. Defaults to None. @@ -165,8 +154,8 @@ def create_voice_from_preview( api_key="YOUR_API_KEY", ) client.text_to_voice.create_voice_from_preview( - voice_name="Little squeaky mouse", - voice_description="A sassy little squeaky mouse", + voice_name="Sassy squeaky mouse", + voice_description="A sassy squeaky mouse", generated_voice_id="37HceQefKmEi3bGovXjL", ) """ @@ -236,18 +225,7 @@ async def create_previews( Text to generate, text length has to be between 100 and 1000. output_format : typing.Optional[TextToVoiceCreatePreviewsRequestOutputFormat] - Output format of the generated audio. Must be one of: - mp3_22050_32 - output format, mp3 with 22.05kHz sample rate at 32kbps. - mp3_44100_32 - output format, mp3 with 44.1kHz sample rate at 32kbps. - mp3_44100_64 - output format, mp3 with 44.1kHz sample rate at 64kbps. - mp3_44100_96 - output format, mp3 with 44.1kHz sample rate at 96kbps. - mp3_44100_128 - default output format, mp3 with 44.1kHz sample rate at 128kbps. - mp3_44100_192 - output format, mp3 with 44.1kHz sample rate at 192kbps. Requires you to be subscribed to Creator tier or above. - pcm_16000 - PCM format (S16LE) with 16kHz sample rate. - pcm_22050 - PCM format (S16LE) with 22.05kHz sample rate. - pcm_24000 - PCM format (S16LE) with 24kHz sample rate. - pcm_44100 - PCM format (S16LE) with 44.1kHz sample rate. Requires you to be subscribed to Pro tier or above. - ulaw_8000 - μ-law format (sometimes written mu-law, often approximated as u-law) with 8kHz sample rate. Note that this format is commonly used for Twilio audio inputs. + Output format of the generated audio. Formatted as codec_sample_rate_bitrate. So an mp3 with 22.05kHz sample rate at 32kbs is represented as mp3_22050_32. MP3 with 192kbps bitrate requires you to be subscribed to Creator tier or above. PCM with 44.1kHz sample rate requires you to be subscribed to Pro tier or above. Note that the μ-law format (sometimes written mu-law, often approximated as u-law) is commonly used for Twilio audio inputs. auto_generate_text : typing.Optional[bool] Whether to automatically generate a text suitable for the voice description. @@ -273,7 +251,7 @@ async def create_previews( async def main() -> None: await client.text_to_voice.create_previews( - voice_description="A sassy little squeaky mouse", + voice_description="A sassy squeaky mouse", text="Every act of kindness, no matter how small, carries value and can make a difference, as no gesture of goodwill is ever wasted.", ) @@ -332,7 +310,7 @@ async def create_voice_from_preview( request_options: typing.Optional[RequestOptions] = None, ) -> Voice: """ - Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using /v1/text-to-voice/create-previews. + Create a voice from previously generated voice preview. This endpoint should be called after you fetched a generated_voice_id using POST /v1/text-to-voice/create-previews. Parameters ---------- @@ -343,7 +321,7 @@ async def create_voice_from_preview( Description to use for the created voice. generated_voice_id : str - The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. + The generated_voice_id to create, call POST /v1/text-to-voice/create-previews and fetch the generated_voice_id from the response header if don't have one yet. labels : typing.Optional[typing.Dict[str, str]] Optional, metadata to add to the created voice. Defaults to None. @@ -372,8 +350,8 @@ async def create_voice_from_preview( async def main() -> None: await client.text_to_voice.create_voice_from_preview( - voice_name="Little squeaky mouse", - voice_description="A sassy little squeaky mouse", + voice_name="Sassy squeaky mouse", + voice_description="A sassy squeaky mouse", generated_voice_id="37HceQefKmEi3bGovXjL", ) diff --git a/src/elevenlabs/types/__init__.py b/src/elevenlabs/types/__init__.py index 818d153c..b23b9488 100644 --- a/src/elevenlabs/types/__init__.py +++ b/src/elevenlabs/types/__init__.py @@ -7,12 +7,15 @@ from .add_project_response_model import AddProjectResponseModel from .add_pronunciation_dictionary_response_model import AddPronunciationDictionaryResponseModel from .add_pronunciation_dictionary_rules_response_model import AddPronunciationDictionaryRulesResponseModel +from .add_sharing_voice_request import AddSharingVoiceRequest from .add_voice_ivc_response_model import AddVoiceIvcResponseModel from .add_voice_response_model import AddVoiceResponseModel +from .add_workspace_group_member_response_model import AddWorkspaceGroupMemberResponseModel +from .add_workspace_invite_response_model import AddWorkspaceInviteResponseModel from .age import Age from .agent_ban import AgentBan from .agent_call_limits import AgentCallLimits -from .agent_config import AgentConfig +from .agent_config_api_model import AgentConfigApiModel from .agent_config_override import AgentConfigOverride from .agent_config_override_config import AgentConfigOverrideConfig from .agent_metadata_response_model import AgentMetadataResponseModel @@ -53,23 +56,23 @@ from .chapter_content_paragraph_tts_node_input_model import ChapterContentParagraphTtsNodeInputModel from .chapter_content_response_model import ChapterContentResponseModel from .chapter_response import ChapterResponse +from .chapter_snapshot_extended_response_model import ChapterSnapshotExtendedResponseModel from .chapter_snapshot_response import ChapterSnapshotResponse from .chapter_snapshots_response import ChapterSnapshotsResponse from .chapter_state import ChapterState from .chapter_statistics_response import ChapterStatisticsResponse from .chapter_with_content_response_model import ChapterWithContentResponseModel from .chapter_with_content_response_model_state import ChapterWithContentResponseModelState +from .character_alignment_model import CharacterAlignmentModel from .character_alignment_response_model import CharacterAlignmentResponseModel from .client_event import ClientEvent from .client_tool_config import ClientToolConfig -from .conv_ai_new_secret_config import ConvAiNewSecretConfig from .conv_ai_secret_locator import ConvAiSecretLocator -from .conv_ai_stored_secret_config import ConvAiStoredSecretConfig from .conv_ai_stored_secret_dependencies import ConvAiStoredSecretDependencies -from .conv_ai_stored_secret_dependencies_agents_item import ( - ConvAiStoredSecretDependenciesAgentsItem, - ConvAiStoredSecretDependenciesAgentsItem_Available, - ConvAiStoredSecretDependenciesAgentsItem_Unknown, +from .conv_ai_stored_secret_dependencies_agent_tools_item import ( + ConvAiStoredSecretDependenciesAgentToolsItem, + ConvAiStoredSecretDependenciesAgentToolsItem_Available, + ConvAiStoredSecretDependenciesAgentToolsItem_Unknown, ) from .conv_ai_stored_secret_dependencies_tools_item import ( ConvAiStoredSecretDependenciesToolsItem, @@ -107,29 +110,50 @@ from .conversation_summary_response_model_status import ConversationSummaryResponseModelStatus from .conversation_token_db_model import ConversationTokenDbModel from .conversation_token_purpose import ConversationTokenPurpose -from .conversational_config import ConversationalConfig +from .conversational_config_api_model import ConversationalConfigApiModel +from .convert_chapter_response_model import ConvertChapterResponseModel +from .convert_project_response_model import ConvertProjectResponseModel from .create_agent_response_model import CreateAgentResponseModel +from .create_audio_native_project_request import CreateAudioNativeProjectRequest from .create_phone_number_response_model import CreatePhoneNumberResponseModel +from .create_pronunciation_dictionary_response_model import CreatePronunciationDictionaryResponseModel from .currency import Currency from .custom_llm import CustomLlm from .data_collection_result_common_model import DataCollectionResultCommonModel +from .delete_chapter_response_model import DeleteChapterResponseModel +from .delete_dubbing_response_model import DeleteDubbingResponseModel +from .delete_project_response_model import DeleteProjectResponseModel from .delete_sample_response_model import DeleteSampleResponseModel +from .delete_voice_response_model import DeleteVoiceResponseModel +from .delete_workspace_group_member_response_model import DeleteWorkspaceGroupMemberResponseModel +from .delete_workspace_invite_response_model import DeleteWorkspaceInviteResponseModel from .dependent_available_agent_identifier import DependentAvailableAgentIdentifier from .dependent_available_agent_identifier_access_level import DependentAvailableAgentIdentifierAccessLevel +from .dependent_available_agent_tool_identifier import DependentAvailableAgentToolIdentifier +from .dependent_available_agent_tool_identifier_access_level import DependentAvailableAgentToolIdentifierAccessLevel from .dependent_available_tool_identifier import DependentAvailableToolIdentifier from .dependent_available_tool_identifier_access_level import DependentAvailableToolIdentifierAccessLevel +from .dependent_phone_number_identifier import DependentPhoneNumberIdentifier from .dependent_unknown_agent_identifier import DependentUnknownAgentIdentifier +from .dependent_unknown_agent_tool_identifier import DependentUnknownAgentToolIdentifier from .dependent_unknown_tool_identifier import DependentUnknownToolIdentifier from .do_dubbing_response import DoDubbingResponse +from .document_usage_mode_enum import DocumentUsageModeEnum +from .dubbed_segment import DubbedSegment from .dubbing_media_metadata import DubbingMediaMetadata +from .dubbing_media_reference import DubbingMediaReference from .dubbing_metadata_response import DubbingMetadataResponse +from .dubbing_resource import DubbingResource from .dynamic_variables_config import DynamicVariablesConfig from .dynamic_variables_config_dynamic_variable_placeholders_value import ( DynamicVariablesConfigDynamicVariablePlaceholdersValue, ) from .edit_chapter_response_model import EditChapterResponseModel from .edit_project_response_model import EditProjectResponseModel +from .edit_voice_response_model import EditVoiceResponseModel +from .edit_voice_settings_response_model import EditVoiceSettingsResponseModel from .embed_variant import EmbedVariant +from .embedding_model_enum import EmbeddingModelEnum from .evaluation_settings import EvaluationSettings from .evaluation_success_result import EvaluationSuccessResult from .extended_subscription_response_model_billing_period import ExtendedSubscriptionResponseModelBillingPeriod @@ -147,7 +171,7 @@ from .get_agents_page_response_model import GetAgentsPageResponseModel from .get_audio_native_project_settings_response_model import GetAudioNativeProjectSettingsResponseModel from .get_chapters_response import GetChaptersResponse -from .get_convai_settings_response_model import GetConvaiSettingsResponseModel +from .get_conv_ai_settings_response_model import GetConvAiSettingsResponseModel from .get_conversation_response_model import GetConversationResponseModel from .get_conversation_response_model_status import GetConversationResponseModelStatus from .get_conversations_page_response_model import GetConversationsPageResponseModel @@ -183,8 +207,10 @@ from .http_validation_error import HttpValidationError from .image_avatar import ImageAvatar from .invoice import Invoice +from .knowledge_base_document_metadata_response_model import KnowledgeBaseDocumentMetadataResponseModel from .knowledge_base_locator import KnowledgeBaseLocator from .knowledge_base_locator_type import KnowledgeBaseLocatorType +from .language_added_response import LanguageAddedResponse from .language_preset import LanguagePreset from .language_preset_translation import LanguagePresetTranslation from .language_response import LanguageResponse @@ -205,7 +231,6 @@ from .object_json_schema_property_properties_value import ObjectJsonSchemaPropertyPropertiesValue from .orb_avatar import OrbAvatar from .output_format import OutputFormat -from .paginated_listed_review_task_instance_model import PaginatedListedReviewTaskInstanceModel from .phone_number_agent_info import PhoneNumberAgentInfo from .podcast_bulletin_mode import PodcastBulletinMode from .podcast_bulletin_mode_data import PodcastBulletinModeData @@ -226,11 +251,14 @@ from .project_extended_response_model_apply_text_normalization import ProjectExtendedResponseModelApplyTextNormalization from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset +from .project_extended_response_model_source_type import ProjectExtendedResponseModelSourceType from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience from .project_response import ProjectResponse from .project_response_model_access_level import ProjectResponseModelAccessLevel from .project_response_model_fiction import ProjectResponseModelFiction +from .project_response_model_source_type import ProjectResponseModelSourceType from .project_response_model_target_audience import ProjectResponseModelTargetAudience +from .project_snapshot_extended_response_model import ProjectSnapshotExtendedResponseModel from .project_snapshot_response import ProjectSnapshotResponse from .project_snapshot_upload_response_model import ProjectSnapshotUploadResponseModel from .project_snapshot_upload_response_model_status import ProjectSnapshotUploadResponseModelStatus @@ -252,22 +280,29 @@ from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel from .pydantic_pronunciation_dictionary_version_locator import PydanticPronunciationDictionaryVersionLocator from .query_params_json_schema import QueryParamsJsonSchema -from .quote_request_model import QuoteRequestModel -from .quote_response_model import QuoteResponseModel +from .rag_config import RagConfig +from .rag_index_response_model import RagIndexResponseModel +from .rag_index_status import RagIndexStatus from .reader_resource_response_model import ReaderResourceResponseModel from .reader_resource_response_model_resource_type import ReaderResourceResponseModelResourceType from .recording_response import RecordingResponse from .remove_pronunciation_dictionary_rules_response_model import RemovePronunciationDictionaryRulesResponseModel from .resource_access_info import ResourceAccessInfo from .resource_access_info_role import ResourceAccessInfoRole -from .review_state import ReviewState from .review_status import ReviewStatus -from .review_task_instance_response_model import ReviewTaskInstanceResponseModel from .safety_common_model import SafetyCommonModel from .safety_evaluation import SafetyEvaluation from .safety_response_model import SafetyResponseModel from .safety_rule import SafetyRule from .secret_dependency_type import SecretDependencyType +from .segment_create_response import SegmentCreateResponse +from .segment_delete_response import SegmentDeleteResponse +from .segment_dub_response import SegmentDubResponse +from .segment_transcription_response import SegmentTranscriptionResponse +from .segment_translation_response import SegmentTranslationResponse +from .segment_update_response import SegmentUpdateResponse +from .speaker_segment import SpeakerSegment +from .speaker_track import SpeakerTrack from .speech_history_item_response import SpeechHistoryItemResponse from .speech_history_item_response_model_source import SpeechHistoryItemResponseModelSource from .speech_history_item_response_model_voice_category import SpeechHistoryItemResponseModelVoiceCategory @@ -284,32 +319,8 @@ from .subscription_status import SubscriptionStatus from .subscription_usage_response_model import SubscriptionUsageResponseModel from .system_tool_config import SystemToolConfig -from .tag_kind import TagKind -from .tag_model import TagModel -from .task_instance_event_kind import TaskInstanceEventKind -from .task_instance_event_response_model import TaskInstanceEventResponseModel from .telephony_provider import TelephonyProvider from .text_to_speech_as_stream_request import TextToSpeechAsStreamRequest -from .tool_request_model import ToolRequestModel -from .tool_request_model_tool_config import ( - ToolRequestModelToolConfig, - ToolRequestModelToolConfig_Client, - ToolRequestModelToolConfig_System, - ToolRequestModelToolConfig_Webhook, -) -from .tool_response_model import ToolResponseModel -from .tool_response_model_dependent_agents_item import ( - ToolResponseModelDependentAgentsItem, - ToolResponseModelDependentAgentsItem_Available, - ToolResponseModelDependentAgentsItem_Unknown, -) -from .tool_response_model_tool_config import ( - ToolResponseModelToolConfig, - ToolResponseModelToolConfig_Client, - ToolResponseModelToolConfig_System, - ToolResponseModelToolConfig_Webhook, -) -from .tools_response_model import ToolsResponseModel from .tts_conversational_config import TtsConversationalConfig from .tts_conversational_config_override import TtsConversationalConfigOverride from .tts_conversational_config_override_config import TtsConversationalConfigOverrideConfig @@ -318,6 +329,7 @@ from .tts_output_format import TtsOutputFormat from .turn_config import TurnConfig from .turn_mode import TurnMode +from .update_workspace_member_response_model import UpdateWorkspaceMemberResponseModel from .url_avatar import UrlAvatar from .usage_characters_response_model import UsageCharactersResponseModel from .user import User @@ -371,12 +383,15 @@ "AddProjectResponseModel", "AddPronunciationDictionaryResponseModel", "AddPronunciationDictionaryRulesResponseModel", + "AddSharingVoiceRequest", "AddVoiceIvcResponseModel", "AddVoiceResponseModel", + "AddWorkspaceGroupMemberResponseModel", + "AddWorkspaceInviteResponseModel", "Age", "AgentBan", "AgentCallLimits", - "AgentConfig", + "AgentConfigApiModel", "AgentConfigOverride", "AgentConfigOverrideConfig", "AgentMetadataResponseModel", @@ -411,22 +426,22 @@ "ChapterContentParagraphTtsNodeInputModel", "ChapterContentResponseModel", "ChapterResponse", + "ChapterSnapshotExtendedResponseModel", "ChapterSnapshotResponse", "ChapterSnapshotsResponse", "ChapterState", "ChapterStatisticsResponse", "ChapterWithContentResponseModel", "ChapterWithContentResponseModelState", + "CharacterAlignmentModel", "CharacterAlignmentResponseModel", "ClientEvent", "ClientToolConfig", - "ConvAiNewSecretConfig", "ConvAiSecretLocator", - "ConvAiStoredSecretConfig", "ConvAiStoredSecretDependencies", - "ConvAiStoredSecretDependenciesAgentsItem", - "ConvAiStoredSecretDependenciesAgentsItem_Available", - "ConvAiStoredSecretDependenciesAgentsItem_Unknown", + "ConvAiStoredSecretDependenciesAgentToolsItem", + "ConvAiStoredSecretDependenciesAgentToolsItem_Available", + "ConvAiStoredSecretDependenciesAgentToolsItem_Unknown", "ConvAiStoredSecretDependenciesToolsItem", "ConvAiStoredSecretDependenciesToolsItem_Available", "ConvAiStoredSecretDependenciesToolsItem_Unknown", @@ -455,27 +470,48 @@ "ConversationSummaryResponseModelStatus", "ConversationTokenDbModel", "ConversationTokenPurpose", - "ConversationalConfig", + "ConversationalConfigApiModel", + "ConvertChapterResponseModel", + "ConvertProjectResponseModel", "CreateAgentResponseModel", + "CreateAudioNativeProjectRequest", "CreatePhoneNumberResponseModel", + "CreatePronunciationDictionaryResponseModel", "Currency", "CustomLlm", "DataCollectionResultCommonModel", + "DeleteChapterResponseModel", + "DeleteDubbingResponseModel", + "DeleteProjectResponseModel", "DeleteSampleResponseModel", + "DeleteVoiceResponseModel", + "DeleteWorkspaceGroupMemberResponseModel", + "DeleteWorkspaceInviteResponseModel", "DependentAvailableAgentIdentifier", "DependentAvailableAgentIdentifierAccessLevel", + "DependentAvailableAgentToolIdentifier", + "DependentAvailableAgentToolIdentifierAccessLevel", "DependentAvailableToolIdentifier", "DependentAvailableToolIdentifierAccessLevel", + "DependentPhoneNumberIdentifier", "DependentUnknownAgentIdentifier", + "DependentUnknownAgentToolIdentifier", "DependentUnknownToolIdentifier", "DoDubbingResponse", + "DocumentUsageModeEnum", + "DubbedSegment", "DubbingMediaMetadata", + "DubbingMediaReference", "DubbingMetadataResponse", + "DubbingResource", "DynamicVariablesConfig", "DynamicVariablesConfigDynamicVariablePlaceholdersValue", "EditChapterResponseModel", "EditProjectResponseModel", + "EditVoiceResponseModel", + "EditVoiceSettingsResponseModel", "EmbedVariant", + "EmbeddingModelEnum", "EvaluationSettings", "EvaluationSuccessResult", "ExtendedSubscriptionResponseModelBillingPeriod", @@ -491,7 +527,7 @@ "GetAgentsPageResponseModel", "GetAudioNativeProjectSettingsResponseModel", "GetChaptersResponse", - "GetConvaiSettingsResponseModel", + "GetConvAiSettingsResponseModel", "GetConversationResponseModel", "GetConversationResponseModelStatus", "GetConversationsPageResponseModel", @@ -523,8 +559,10 @@ "HttpValidationError", "ImageAvatar", "Invoice", + "KnowledgeBaseDocumentMetadataResponseModel", "KnowledgeBaseLocator", "KnowledgeBaseLocatorType", + "LanguageAddedResponse", "LanguagePreset", "LanguagePresetTranslation", "LanguageResponse", @@ -545,7 +583,6 @@ "ObjectJsonSchemaPropertyPropertiesValue", "OrbAvatar", "OutputFormat", - "PaginatedListedReviewTaskInstanceModel", "PhoneNumberAgentInfo", "PodcastBulletinMode", "PodcastBulletinModeData", @@ -566,11 +603,14 @@ "ProjectExtendedResponseModelApplyTextNormalization", "ProjectExtendedResponseModelFiction", "ProjectExtendedResponseModelQualityPreset", + "ProjectExtendedResponseModelSourceType", "ProjectExtendedResponseModelTargetAudience", "ProjectResponse", "ProjectResponseModelAccessLevel", "ProjectResponseModelFiction", + "ProjectResponseModelSourceType", "ProjectResponseModelTargetAudience", + "ProjectSnapshotExtendedResponseModel", "ProjectSnapshotResponse", "ProjectSnapshotUploadResponseModel", "ProjectSnapshotUploadResponseModelStatus", @@ -590,22 +630,29 @@ "PronunciationDictionaryVersionResponseModel", "PydanticPronunciationDictionaryVersionLocator", "QueryParamsJsonSchema", - "QuoteRequestModel", - "QuoteResponseModel", + "RagConfig", + "RagIndexResponseModel", + "RagIndexStatus", "ReaderResourceResponseModel", "ReaderResourceResponseModelResourceType", "RecordingResponse", "RemovePronunciationDictionaryRulesResponseModel", "ResourceAccessInfo", "ResourceAccessInfoRole", - "ReviewState", "ReviewStatus", - "ReviewTaskInstanceResponseModel", "SafetyCommonModel", "SafetyEvaluation", "SafetyResponseModel", "SafetyRule", "SecretDependencyType", + "SegmentCreateResponse", + "SegmentDeleteResponse", + "SegmentDubResponse", + "SegmentTranscriptionResponse", + "SegmentTranslationResponse", + "SegmentUpdateResponse", + "SpeakerSegment", + "SpeakerTrack", "SpeechHistoryItemResponse", "SpeechHistoryItemResponseModelSource", "SpeechHistoryItemResponseModelVoiceCategory", @@ -622,26 +669,8 @@ "SubscriptionStatus", "SubscriptionUsageResponseModel", "SystemToolConfig", - "TagKind", - "TagModel", - "TaskInstanceEventKind", - "TaskInstanceEventResponseModel", "TelephonyProvider", "TextToSpeechAsStreamRequest", - "ToolRequestModel", - "ToolRequestModelToolConfig", - "ToolRequestModelToolConfig_Client", - "ToolRequestModelToolConfig_System", - "ToolRequestModelToolConfig_Webhook", - "ToolResponseModel", - "ToolResponseModelDependentAgentsItem", - "ToolResponseModelDependentAgentsItem_Available", - "ToolResponseModelDependentAgentsItem_Unknown", - "ToolResponseModelToolConfig", - "ToolResponseModelToolConfig_Client", - "ToolResponseModelToolConfig_System", - "ToolResponseModelToolConfig_Webhook", - "ToolsResponseModel", "TtsConversationalConfig", "TtsConversationalConfigOverride", "TtsConversationalConfigOverrideConfig", @@ -650,6 +679,7 @@ "TtsOutputFormat", "TurnConfig", "TurnMode", + "UpdateWorkspaceMemberResponseModel", "UrlAvatar", "UsageCharactersResponseModel", "User", diff --git a/src/elevenlabs/types/add_knowledge_base_response_model.py b/src/elevenlabs/types/add_knowledge_base_response_model.py index e9105cb4..23827910 100644 --- a/src/elevenlabs/types/add_knowledge_base_response_model.py +++ b/src/elevenlabs/types/add_knowledge_base_response_model.py @@ -8,6 +8,7 @@ class AddKnowledgeBaseResponseModel(UncheckedBaseModel): id: str + prompt_injectable: bool if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/add_pronunciation_dictionary_response_model.py b/src/elevenlabs/types/add_pronunciation_dictionary_response_model.py index ef0d7f00..cd013cca 100644 --- a/src/elevenlabs/types/add_pronunciation_dictionary_response_model.py +++ b/src/elevenlabs/types/add_pronunciation_dictionary_response_model.py @@ -1,18 +1,41 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class AddPronunciationDictionaryResponseModel(UncheckedBaseModel): - id: str - name: str - created_by: str - creation_time_unix: int - version_id: str - description: typing.Optional[str] = None + id: str = pydantic.Field() + """ + The ID of the created pronunciation dictionary. + """ + + name: str = pydantic.Field() + """ + The name of the created pronunciation dictionary. + """ + + created_by: str = pydantic.Field() + """ + The user ID of the creator of the pronunciation dictionary. + """ + + creation_time_unix: int = pydantic.Field() + """ + The creation time of the pronunciation dictionary in Unix timestamp. + """ + + version_id: str = pydantic.Field() + """ + The ID of the created pronunciation dictionary version. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + The description of the pronunciation dictionary. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/add_pronunciation_dictionary_rules_response_model.py b/src/elevenlabs/types/add_pronunciation_dictionary_rules_response_model.py index bcd7ce8c..7eece657 100644 --- a/src/elevenlabs/types/add_pronunciation_dictionary_rules_response_model.py +++ b/src/elevenlabs/types/add_pronunciation_dictionary_rules_response_model.py @@ -1,14 +1,21 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class AddPronunciationDictionaryRulesResponseModel(UncheckedBaseModel): - id: str - version_id: str + id: str = pydantic.Field() + """ + The ID of the pronunciation dictionary. + """ + + version_id: str = pydantic.Field() + """ + The version ID of the pronunciation dictionary. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/tag_kind.py b/src/elevenlabs/types/add_sharing_voice_request.py similarity index 52% rename from src/elevenlabs/types/tag_kind.py rename to src/elevenlabs/types/add_sharing_voice_request.py index f5c47a0a..f6a04a45 100644 --- a/src/elevenlabs/types/tag_kind.py +++ b/src/elevenlabs/types/add_sharing_voice_request.py @@ -2,4 +2,4 @@ import typing -TagKind = typing.Union[typing.Literal["lang", "job_type"], typing.Any] +AddSharingVoiceRequest = typing.Optional[typing.Any] diff --git a/src/elevenlabs/types/add_voice_ivc_response_model.py b/src/elevenlabs/types/add_voice_ivc_response_model.py index 0c85c00e..f84d7dfa 100644 --- a/src/elevenlabs/types/add_voice_ivc_response_model.py +++ b/src/elevenlabs/types/add_voice_ivc_response_model.py @@ -1,14 +1,21 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class AddVoiceIvcResponseModel(UncheckedBaseModel): - voice_id: str - requires_verification: bool + voice_id: str = pydantic.Field() + """ + The ID of the newly created voice. + """ + + requires_verification: bool = pydantic.Field() + """ + Whether the voice requires verification + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/add_voice_response_model.py b/src/elevenlabs/types/add_voice_response_model.py index 8a285419..f914dd61 100644 --- a/src/elevenlabs/types/add_voice_response_model.py +++ b/src/elevenlabs/types/add_voice_response_model.py @@ -1,13 +1,16 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class AddVoiceResponseModel(UncheckedBaseModel): - voice_id: str + voice_id: str = pydantic.Field() + """ + The ID of the voice. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/add_workspace_group_member_response_model.py b/src/elevenlabs/types/add_workspace_group_member_response_model.py new file mode 100644 index 00000000..55e2cc2b --- /dev/null +++ b/src/elevenlabs/types/add_workspace_group_member_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class AddWorkspaceGroupMemberResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the workspace group member addition request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/add_workspace_invite_response_model.py b/src/elevenlabs/types/add_workspace_invite_response_model.py new file mode 100644 index 00000000..1d3749f5 --- /dev/null +++ b/src/elevenlabs/types/add_workspace_invite_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class AddWorkspaceInviteResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the workspace invite request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/agent_config.py b/src/elevenlabs/types/agent_config_api_model.py similarity index 82% rename from src/elevenlabs/types/agent_config.py rename to src/elevenlabs/types/agent_config_api_model.py index 36ffac18..9a1651c2 100644 --- a/src/elevenlabs/types/agent_config.py +++ b/src/elevenlabs/types/agent_config_api_model.py @@ -5,18 +5,18 @@ from .array_json_schema_property import ArrayJsonSchemaProperty from .object_json_schema_property import ObjectJsonSchemaProperty import typing -from .prompt_agent import PromptAgent from .dynamic_variables_config import DynamicVariablesConfig +from .prompt_agent import PromptAgent from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic from ..core.pydantic_utilities import update_forward_refs -class AgentConfig(UncheckedBaseModel): - prompt: typing.Optional[PromptAgent] = None +class AgentConfigApiModel(UncheckedBaseModel): first_message: typing.Optional[str] = None language: typing.Optional[str] = None dynamic_variables: typing.Optional[DynamicVariablesConfig] = None + prompt: typing.Optional[PromptAgent] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 @@ -28,5 +28,5 @@ class Config: extra = pydantic.Extra.allow -update_forward_refs(ArrayJsonSchemaProperty, AgentConfig=AgentConfig) -update_forward_refs(ObjectJsonSchemaProperty, AgentConfig=AgentConfig) +update_forward_refs(ArrayJsonSchemaProperty, AgentConfigApiModel=AgentConfigApiModel) +update_forward_refs(ObjectJsonSchemaProperty, AgentConfigApiModel=AgentConfigApiModel) diff --git a/src/elevenlabs/types/audio_native_create_project_response_model.py b/src/elevenlabs/types/audio_native_create_project_response_model.py index 5919e3b3..acf5484c 100644 --- a/src/elevenlabs/types/audio_native_create_project_response_model.py +++ b/src/elevenlabs/types/audio_native_create_project_response_model.py @@ -1,15 +1,26 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class AudioNativeCreateProjectResponseModel(UncheckedBaseModel): - project_id: str - converting: bool - html_snippet: str + project_id: str = pydantic.Field() + """ + The ID of the created Audio Native project. + """ + + converting: bool = pydantic.Field() + """ + Whether the project is currently being converted. + """ + + html_snippet: str = pydantic.Field() + """ + The HTML snippet to embed the Audio Native player. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/audio_native_edit_content_response_model.py b/src/elevenlabs/types/audio_native_edit_content_response_model.py index c66254e3..4dd1f574 100644 --- a/src/elevenlabs/types/audio_native_edit_content_response_model.py +++ b/src/elevenlabs/types/audio_native_edit_content_response_model.py @@ -1,16 +1,31 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class AudioNativeEditContentResponseModel(UncheckedBaseModel): - project_id: str - converting: bool - publishing: bool - html_snippet: str + project_id: str = pydantic.Field() + """ + The ID of the project. + """ + + converting: bool = pydantic.Field() + """ + Whether the project is currently being converted. + """ + + publishing: bool = pydantic.Field() + """ + Whether the project is currently being published. + """ + + html_snippet: str = pydantic.Field() + """ + The HTML snippet to embed the Audio Native player. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/audio_native_project_settings_response_model.py b/src/elevenlabs/types/audio_native_project_settings_response_model.py index 0b6c5742..e805ca9c 100644 --- a/src/elevenlabs/types/audio_native_project_settings_response_model.py +++ b/src/elevenlabs/types/audio_native_project_settings_response_model.py @@ -1,21 +1,56 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import typing import pydantic +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class AudioNativeProjectSettingsResponseModel(UncheckedBaseModel): - title: str - image: str - author: str - small: bool - text_color: str - background_color: str - sessionization: int - audio_path: str - audio_url: str + title: str = pydantic.Field() + """ + The title of the project. + """ + + image: str = pydantic.Field() + """ + The image of the project. + """ + + author: str = pydantic.Field() + """ + The author of the project. + """ + + small: bool = pydantic.Field() + """ + Whether the project is small. + """ + + text_color: str = pydantic.Field() + """ + The text color of the project. + """ + + background_color: str = pydantic.Field() + """ + The background color of the project. + """ + + sessionization: int = pydantic.Field() + """ + The sessionization of the project. Specifies for how many minutes to persist the session across page reloads. + """ + + audio_path: typing.Optional[str] = pydantic.Field(default=None) + """ + The path of the audio file. + """ + + audio_url: typing.Optional[str] = pydantic.Field(default=None) + """ + The URL of the audio file. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/body_add_to_knowledge_base_v_1_convai_add_to_knowledge_base_post.py b/src/elevenlabs/types/body_add_to_knowledge_base_v_1_convai_add_to_knowledge_base_post.py index 112273c1..134ef767 100644 --- a/src/elevenlabs/types/body_add_to_knowledge_base_v_1_convai_add_to_knowledge_base_post.py +++ b/src/elevenlabs/types/body_add_to_knowledge_base_v_1_convai_add_to_knowledge_base_post.py @@ -7,6 +7,11 @@ class BodyAddToKnowledgeBaseV1ConvaiAddToKnowledgeBasePost(UncheckedBaseModel): + name: typing.Optional[str] = pydantic.Field(default=None) + """ + A custom, human-readable name for the document. + """ + url: typing.Optional[str] = pydantic.Field(default=None) """ URL to a page of documentation that the agent will have access to in order to interact with users. diff --git a/src/elevenlabs/types/body_add_to_knowledge_base_v_1_convai_agents_agent_id_add_to_knowledge_base_post.py b/src/elevenlabs/types/body_add_to_knowledge_base_v_1_convai_agents_agent_id_add_to_knowledge_base_post.py index cb3ef4bc..592599e4 100644 --- a/src/elevenlabs/types/body_add_to_knowledge_base_v_1_convai_agents_agent_id_add_to_knowledge_base_post.py +++ b/src/elevenlabs/types/body_add_to_knowledge_base_v_1_convai_agents_agent_id_add_to_knowledge_base_post.py @@ -7,6 +7,11 @@ class BodyAddToKnowledgeBaseV1ConvaiAgentsAgentIdAddToKnowledgeBasePost(UncheckedBaseModel): + name: typing.Optional[str] = pydantic.Field(default=None) + """ + A custom, human-readable name for the document. + """ + url: typing.Optional[str] = pydantic.Field(default=None) """ URL to a page of documentation that the agent will have access to in order to interact with users. diff --git a/src/elevenlabs/types/chapter_response.py b/src/elevenlabs/types/chapter_response.py index 192804d6..8410cb8e 100644 --- a/src/elevenlabs/types/chapter_response.py +++ b/src/elevenlabs/types/chapter_response.py @@ -1,22 +1,53 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from .chapter_state import ChapterState from .chapter_statistics_response import ChapterStatisticsResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class ChapterResponse(UncheckedBaseModel): - chapter_id: str - name: str - last_conversion_date_unix: typing.Optional[int] = None - conversion_progress: typing.Optional[float] = None - can_be_downloaded: bool - state: ChapterState - statistics: typing.Optional[ChapterStatisticsResponse] = None - last_conversion_error: typing.Optional[str] = None + chapter_id: str = pydantic.Field() + """ + The ID of the chapter. + """ + + name: str = pydantic.Field() + """ + The name of the chapter. + """ + + last_conversion_date_unix: typing.Optional[int] = pydantic.Field(default=None) + """ + The last conversion date of the chapter. + """ + + conversion_progress: typing.Optional[float] = pydantic.Field(default=None) + """ + The conversion progress of the chapter. + """ + + can_be_downloaded: bool = pydantic.Field() + """ + Whether the chapter can be downloaded. + """ + + state: ChapterState = pydantic.Field() + """ + The state of the chapter. + """ + + statistics: typing.Optional[ChapterStatisticsResponse] = pydantic.Field(default=None) + """ + The statistics of the chapter. + """ + + last_conversion_error: typing.Optional[str] = pydantic.Field(default=None) + """ + The last conversion error of the chapter. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/chapter_snapshot_extended_response_model.py b/src/elevenlabs/types/chapter_snapshot_extended_response_model.py new file mode 100644 index 00000000..4abc2a3e --- /dev/null +++ b/src/elevenlabs/types/chapter_snapshot_extended_response_model.py @@ -0,0 +1,45 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +import typing +from .character_alignment_model import CharacterAlignmentModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ChapterSnapshotExtendedResponseModel(UncheckedBaseModel): + chapter_snapshot_id: str = pydantic.Field() + """ + The ID of the chapter snapshot. + """ + + project_id: str = pydantic.Field() + """ + The ID of the project. + """ + + chapter_id: str = pydantic.Field() + """ + The ID of the chapter. + """ + + created_at_unix: int = pydantic.Field() + """ + The creation date of the chapter snapshot. + """ + + name: str = pydantic.Field() + """ + The name of the chapter snapshot. + """ + + character_alignments: typing.List[CharacterAlignmentModel] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/chapter_snapshot_response.py b/src/elevenlabs/types/chapter_snapshot_response.py index c316c2e1..bdfaf456 100644 --- a/src/elevenlabs/types/chapter_snapshot_response.py +++ b/src/elevenlabs/types/chapter_snapshot_response.py @@ -1,17 +1,36 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class ChapterSnapshotResponse(UncheckedBaseModel): - chapter_snapshot_id: str - project_id: str - chapter_id: str - created_at_unix: int - name: str + chapter_snapshot_id: str = pydantic.Field() + """ + The ID of the chapter snapshot. + """ + + project_id: str = pydantic.Field() + """ + The ID of the project. + """ + + chapter_id: str = pydantic.Field() + """ + The ID of the chapter. + """ + + created_at_unix: int = pydantic.Field() + """ + The creation date of the chapter snapshot. + """ + + name: str = pydantic.Field() + """ + The name of the chapter snapshot. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/chapter_snapshots_response.py b/src/elevenlabs/types/chapter_snapshots_response.py index a40294c9..2bc9bf85 100644 --- a/src/elevenlabs/types/chapter_snapshots_response.py +++ b/src/elevenlabs/types/chapter_snapshots_response.py @@ -3,12 +3,15 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .chapter_snapshot_response import ChapterSnapshotResponse -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ChapterSnapshotsResponse(UncheckedBaseModel): - snapshots: typing.List[ChapterSnapshotResponse] + snapshots: typing.List[ChapterSnapshotResponse] = pydantic.Field() + """ + List of chapter snapshots. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/chapter_statistics_response.py b/src/elevenlabs/types/chapter_statistics_response.py index 15f5f127..5cc5c049 100644 --- a/src/elevenlabs/types/chapter_statistics_response.py +++ b/src/elevenlabs/types/chapter_statistics_response.py @@ -1,16 +1,31 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class ChapterStatisticsResponse(UncheckedBaseModel): - characters_unconverted: int - characters_converted: int - paragraphs_converted: int - paragraphs_unconverted: int + characters_unconverted: int = pydantic.Field() + """ + The number of unconverted characters. + """ + + characters_converted: int = pydantic.Field() + """ + The number of converted characters. + """ + + paragraphs_converted: int = pydantic.Field() + """ + The number of converted paragraphs. + """ + + paragraphs_unconverted: int = pydantic.Field() + """ + The number of unconverted paragraphs. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/chapter_with_content_response_model.py b/src/elevenlabs/types/chapter_with_content_response_model.py index b3fd5ff0..22ba60a2 100644 --- a/src/elevenlabs/types/chapter_with_content_response_model.py +++ b/src/elevenlabs/types/chapter_with_content_response_model.py @@ -1,23 +1,55 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from .chapter_with_content_response_model_state import ChapterWithContentResponseModelState from .chapter_statistics_response import ChapterStatisticsResponse from .chapter_content_response_model import ChapterContentResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class ChapterWithContentResponseModel(UncheckedBaseModel): - chapter_id: str - name: str - last_conversion_date_unix: typing.Optional[int] = None - conversion_progress: typing.Optional[float] = None - can_be_downloaded: bool - state: ChapterWithContentResponseModelState - statistics: typing.Optional[ChapterStatisticsResponse] = None - last_conversion_error: typing.Optional[str] = None + chapter_id: str = pydantic.Field() + """ + The ID of the chapter. + """ + + name: str = pydantic.Field() + """ + The name of the chapter. + """ + + last_conversion_date_unix: typing.Optional[int] = pydantic.Field(default=None) + """ + The last conversion date of the chapter. + """ + + conversion_progress: typing.Optional[float] = pydantic.Field(default=None) + """ + The conversion progress of the chapter. + """ + + can_be_downloaded: bool = pydantic.Field() + """ + Whether the chapter can be downloaded. + """ + + state: ChapterWithContentResponseModelState = pydantic.Field() + """ + The state of the chapter. + """ + + statistics: typing.Optional[ChapterStatisticsResponse] = pydantic.Field(default=None) + """ + The statistics of the chapter. + """ + + last_conversion_error: typing.Optional[str] = pydantic.Field(default=None) + """ + The last conversion error of the chapter. + """ + content: ChapterContentResponseModel if IS_PYDANTIC_V2: diff --git a/src/elevenlabs/types/character_alignment_model.py b/src/elevenlabs/types/character_alignment_model.py new file mode 100644 index 00000000..00fb275d --- /dev/null +++ b/src/elevenlabs/types/character_alignment_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class CharacterAlignmentModel(UncheckedBaseModel): + characters: typing.List[str] + character_start_times_seconds: typing.List[float] + character_end_times_seconds: typing.List[float] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/client_tool_config.py b/src/elevenlabs/types/client_tool_config.py index 72762e08..2c02824d 100644 --- a/src/elevenlabs/types/client_tool_config.py +++ b/src/elevenlabs/types/client_tool_config.py @@ -5,6 +5,7 @@ from .array_json_schema_property import ArrayJsonSchemaProperty from .object_json_schema_property import ObjectJsonSchemaProperty import typing +from .dynamic_variables_config import DynamicVariablesConfig from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic from ..core.pydantic_utilities import update_forward_refs @@ -20,6 +21,7 @@ class ClientToolConfig(UncheckedBaseModel): parameters: typing.Optional[ObjectJsonSchemaProperty] = None expects_response: typing.Optional[bool] = None response_timeout_secs: typing.Optional[int] = None + dynamic_variables: typing.Optional[DynamicVariablesConfig] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/conv_ai_stored_secret_dependencies.py b/src/elevenlabs/types/conv_ai_stored_secret_dependencies.py index 500d6af1..42edcd13 100644 --- a/src/elevenlabs/types/conv_ai_stored_secret_dependencies.py +++ b/src/elevenlabs/types/conv_ai_stored_secret_dependencies.py @@ -3,16 +3,18 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .conv_ai_stored_secret_dependencies_tools_item import ConvAiStoredSecretDependenciesToolsItem -from .conv_ai_stored_secret_dependencies_agents_item import ConvAiStoredSecretDependenciesAgentsItem +from .conv_ai_stored_secret_dependencies_agent_tools_item import ConvAiStoredSecretDependenciesAgentToolsItem from .secret_dependency_type import SecretDependencyType +from .dependent_phone_number_identifier import DependentPhoneNumberIdentifier from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic class ConvAiStoredSecretDependencies(UncheckedBaseModel): tools: typing.List[ConvAiStoredSecretDependenciesToolsItem] - agents: typing.List[ConvAiStoredSecretDependenciesAgentsItem] + agent_tools: typing.List[ConvAiStoredSecretDependenciesAgentToolsItem] others: typing.List[SecretDependencyType] + phone_numbers: typing.Optional[typing.List[DependentPhoneNumberIdentifier]] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/conv_ai_stored_secret_dependencies_agents_item.py b/src/elevenlabs/types/conv_ai_stored_secret_dependencies_agent_tools_item.py similarity index 63% rename from src/elevenlabs/types/conv_ai_stored_secret_dependencies_agents_item.py rename to src/elevenlabs/types/conv_ai_stored_secret_dependencies_agent_tools_item.py index 136a828b..423caca1 100644 --- a/src/elevenlabs/types/conv_ai_stored_secret_dependencies_agents_item.py +++ b/src/elevenlabs/types/conv_ai_stored_secret_dependencies_agent_tools_item.py @@ -3,19 +3,20 @@ from __future__ import annotations from ..core.unchecked_base_model import UncheckedBaseModel import typing -from .dependent_available_agent_identifier_access_level import DependentAvailableAgentIdentifierAccessLevel +from .dependent_available_agent_tool_identifier_access_level import DependentAvailableAgentToolIdentifierAccessLevel from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic import typing_extensions from ..core.unchecked_base_model import UnionMetadata -class ConvAiStoredSecretDependenciesAgentsItem_Available(UncheckedBaseModel): +class ConvAiStoredSecretDependenciesAgentToolsItem_Available(UncheckedBaseModel): type: typing.Literal["available"] = "available" - id: str - name: str + agent_id: str + agent_name: str + used_by: typing.List[str] created_at_unix_secs: int - access_level: DependentAvailableAgentIdentifierAccessLevel + access_level: DependentAvailableAgentToolIdentifierAccessLevel if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 @@ -27,7 +28,7 @@ class Config: extra = pydantic.Extra.allow -class ConvAiStoredSecretDependenciesAgentsItem_Unknown(UncheckedBaseModel): +class ConvAiStoredSecretDependenciesAgentToolsItem_Unknown(UncheckedBaseModel): type: typing.Literal["unknown"] = "unknown" if IS_PYDANTIC_V2: @@ -40,7 +41,9 @@ class Config: extra = pydantic.Extra.allow -ConvAiStoredSecretDependenciesAgentsItem = typing_extensions.Annotated[ - typing.Union[ConvAiStoredSecretDependenciesAgentsItem_Available, ConvAiStoredSecretDependenciesAgentsItem_Unknown], +ConvAiStoredSecretDependenciesAgentToolsItem = typing_extensions.Annotated[ + typing.Union[ + ConvAiStoredSecretDependenciesAgentToolsItem_Available, ConvAiStoredSecretDependenciesAgentToolsItem_Unknown + ], UnionMetadata(discriminant="type"), ] diff --git a/src/elevenlabs/types/conversational_config.py b/src/elevenlabs/types/conversational_config_api_model.py similarity index 85% rename from src/elevenlabs/types/conversational_config.py rename to src/elevenlabs/types/conversational_config_api_model.py index 9e1932cf..52a7e88f 100644 --- a/src/elevenlabs/types/conversational_config.py +++ b/src/elevenlabs/types/conversational_config_api_model.py @@ -5,24 +5,24 @@ from .array_json_schema_property import ArrayJsonSchemaProperty from .object_json_schema_property import ObjectJsonSchemaProperty import typing -from .agent_config import AgentConfig from .asr_conversational_config import AsrConversationalConfig from .turn_config import TurnConfig from .tts_conversational_config import TtsConversationalConfig from .conversation_config import ConversationConfig from .language_preset import LanguagePreset +from .agent_config_api_model import AgentConfigApiModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic from ..core.pydantic_utilities import update_forward_refs -class ConversationalConfig(UncheckedBaseModel): - agent: typing.Optional[AgentConfig] = None +class ConversationalConfigApiModel(UncheckedBaseModel): asr: typing.Optional[AsrConversationalConfig] = None turn: typing.Optional[TurnConfig] = None tts: typing.Optional[TtsConversationalConfig] = None conversation: typing.Optional[ConversationConfig] = None language_presets: typing.Optional[typing.Dict[str, LanguagePreset]] = None + agent: typing.Optional[AgentConfigApiModel] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 @@ -34,5 +34,5 @@ class Config: extra = pydantic.Extra.allow -update_forward_refs(ArrayJsonSchemaProperty, ConversationalConfig=ConversationalConfig) -update_forward_refs(ObjectJsonSchemaProperty, ConversationalConfig=ConversationalConfig) +update_forward_refs(ArrayJsonSchemaProperty, ConversationalConfigApiModel=ConversationalConfigApiModel) +update_forward_refs(ObjectJsonSchemaProperty, ConversationalConfigApiModel=ConversationalConfigApiModel) diff --git a/src/elevenlabs/types/convert_chapter_response_model.py b/src/elevenlabs/types/convert_chapter_response_model.py new file mode 100644 index 00000000..13b81bdf --- /dev/null +++ b/src/elevenlabs/types/convert_chapter_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class ConvertChapterResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the studio chapter conversion request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/convert_project_response_model.py b/src/elevenlabs/types/convert_project_response_model.py new file mode 100644 index 00000000..36b2f1ef --- /dev/null +++ b/src/elevenlabs/types/convert_project_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class ConvertProjectResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the studio project conversion request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/create_audio_native_project_request.py b/src/elevenlabs/types/create_audio_native_project_request.py new file mode 100644 index 00000000..8932716a --- /dev/null +++ b/src/elevenlabs/types/create_audio_native_project_request.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateAudioNativeProjectRequest = typing.Optional[typing.Any] diff --git a/src/elevenlabs/types/create_pronunciation_dictionary_response_model.py b/src/elevenlabs/types/create_pronunciation_dictionary_response_model.py new file mode 100644 index 00000000..d9dc1f41 --- /dev/null +++ b/src/elevenlabs/types/create_pronunciation_dictionary_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class CreatePronunciationDictionaryResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the create pronunciation dictionary request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/delete_chapter_response_model.py b/src/elevenlabs/types/delete_chapter_response_model.py new file mode 100644 index 00000000..2213b8c5 --- /dev/null +++ b/src/elevenlabs/types/delete_chapter_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class DeleteChapterResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the studio chapter deletion request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/delete_dubbing_response_model.py b/src/elevenlabs/types/delete_dubbing_response_model.py new file mode 100644 index 00000000..8b372990 --- /dev/null +++ b/src/elevenlabs/types/delete_dubbing_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class DeleteDubbingResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the dubbing project. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/delete_project_response_model.py b/src/elevenlabs/types/delete_project_response_model.py new file mode 100644 index 00000000..dcf7402a --- /dev/null +++ b/src/elevenlabs/types/delete_project_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class DeleteProjectResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the studio project deletion request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/paginated_listed_review_task_instance_model.py b/src/elevenlabs/types/delete_voice_response_model.py similarity index 66% rename from src/elevenlabs/types/paginated_listed_review_task_instance_model.py rename to src/elevenlabs/types/delete_voice_response_model.py index 0586fbb1..d81e628b 100644 --- a/src/elevenlabs/types/paginated_listed_review_task_instance_model.py +++ b/src/elevenlabs/types/delete_voice_response_model.py @@ -1,15 +1,16 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel -import typing -from .review_task_instance_response_model import ReviewTaskInstanceResponseModel -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing -class PaginatedListedReviewTaskInstanceModel(UncheckedBaseModel): - review_tasks: typing.List[ReviewTaskInstanceResponseModel] - cursor: typing.Optional[str] = None +class DeleteVoiceResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the voice deletion. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/delete_workspace_group_member_response_model.py b/src/elevenlabs/types/delete_workspace_group_member_response_model.py new file mode 100644 index 00000000..40b01066 --- /dev/null +++ b/src/elevenlabs/types/delete_workspace_group_member_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class DeleteWorkspaceGroupMemberResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the workspace group member deletion request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/delete_workspace_invite_response_model.py b/src/elevenlabs/types/delete_workspace_invite_response_model.py new file mode 100644 index 00000000..a16c7cb6 --- /dev/null +++ b/src/elevenlabs/types/delete_workspace_invite_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class DeleteWorkspaceInviteResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the workspace invite deletion request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/dependent_available_agent_tool_identifier.py b/src/elevenlabs/types/dependent_available_agent_tool_identifier.py new file mode 100644 index 00000000..4c662681 --- /dev/null +++ b/src/elevenlabs/types/dependent_available_agent_tool_identifier.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .dependent_available_agent_tool_identifier_access_level import DependentAvailableAgentToolIdentifierAccessLevel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class DependentAvailableAgentToolIdentifier(UncheckedBaseModel): + agent_id: str + agent_name: str + used_by: typing.List[str] + created_at_unix_secs: int + access_level: DependentAvailableAgentToolIdentifierAccessLevel + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/dependent_available_agent_tool_identifier_access_level.py b/src/elevenlabs/types/dependent_available_agent_tool_identifier_access_level.py new file mode 100644 index 00000000..d2035273 --- /dev/null +++ b/src/elevenlabs/types/dependent_available_agent_tool_identifier_access_level.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DependentAvailableAgentToolIdentifierAccessLevel = typing.Union[typing.Literal["admin", "editor", "viewer"], typing.Any] diff --git a/src/elevenlabs/types/dependent_phone_number_identifier.py b/src/elevenlabs/types/dependent_phone_number_identifier.py new file mode 100644 index 00000000..e9572fcb --- /dev/null +++ b/src/elevenlabs/types/dependent_phone_number_identifier.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .telephony_provider import TelephonyProvider +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class DependentPhoneNumberIdentifier(UncheckedBaseModel): + phone_number_id: str + phone_number: str + label: str + provider: TelephonyProvider = "twilio" + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/dependent_unknown_agent_tool_identifier.py b/src/elevenlabs/types/dependent_unknown_agent_tool_identifier.py new file mode 100644 index 00000000..b321835d --- /dev/null +++ b/src/elevenlabs/types/dependent_unknown_agent_tool_identifier.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class DependentUnknownAgentToolIdentifier(UncheckedBaseModel): + """ + A model that represents an tool dependent on a knowledge base/tools + to which the user has no direct access. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/do_dubbing_response.py b/src/elevenlabs/types/do_dubbing_response.py index 3cc8e523..29f033d5 100644 --- a/src/elevenlabs/types/do_dubbing_response.py +++ b/src/elevenlabs/types/do_dubbing_response.py @@ -1,14 +1,21 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class DoDubbingResponse(UncheckedBaseModel): - dubbing_id: str - expected_duration_sec: float + dubbing_id: str = pydantic.Field() + """ + The ID of the dubbing project. + """ + + expected_duration_sec: float = pydantic.Field() + """ + The expected duration of the dubbing project in seconds. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/document_usage_mode_enum.py b/src/elevenlabs/types/document_usage_mode_enum.py new file mode 100644 index 00000000..bf5bc1c8 --- /dev/null +++ b/src/elevenlabs/types/document_usage_mode_enum.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocumentUsageModeEnum = typing.Union[typing.Literal["prompt", "auto"], typing.Any] diff --git a/src/elevenlabs/types/dubbed_segment.py b/src/elevenlabs/types/dubbed_segment.py new file mode 100644 index 00000000..6ca820e0 --- /dev/null +++ b/src/elevenlabs/types/dubbed_segment.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .dubbing_media_reference import DubbingMediaReference +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class DubbedSegment(UncheckedBaseModel): + start_time: float + end_time: float + text: str + media_ref: DubbingMediaReference + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/dubbing_media_metadata.py b/src/elevenlabs/types/dubbing_media_metadata.py index 7334f6a2..f5da4a96 100644 --- a/src/elevenlabs/types/dubbing_media_metadata.py +++ b/src/elevenlabs/types/dubbing_media_metadata.py @@ -1,14 +1,21 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class DubbingMediaMetadata(UncheckedBaseModel): - content_type: str - duration: float + content_type: str = pydantic.Field() + """ + The content type of the media. + """ + + duration: float = pydantic.Field() + """ + The duration of the media in seconds. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/dubbing_media_reference.py b/src/elevenlabs/types/dubbing_media_reference.py new file mode 100644 index 00000000..32741dfe --- /dev/null +++ b/src/elevenlabs/types/dubbing_media_reference.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class DubbingMediaReference(UncheckedBaseModel): + src: str + content_type: str + bucket_name: str + random_path_slug: str + duration_secs: float + is_audio: bool + url: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/dubbing_metadata_response.py b/src/elevenlabs/types/dubbing_metadata_response.py index 5c3ae585..7d2d48d2 100644 --- a/src/elevenlabs/types/dubbing_metadata_response.py +++ b/src/elevenlabs/types/dubbing_metadata_response.py @@ -1,19 +1,42 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from .dubbing_media_metadata import DubbingMediaMetadata from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class DubbingMetadataResponse(UncheckedBaseModel): - dubbing_id: str - name: str - status: str - target_languages: typing.List[str] - media_metadata: typing.Optional[DubbingMediaMetadata] = None - error: typing.Optional[str] = None + dubbing_id: str = pydantic.Field() + """ + The ID of the dubbing project. + """ + + name: str = pydantic.Field() + """ + The name of the dubbing project. + """ + + status: str = pydantic.Field() + """ + The status of the dubbing project. Either 'dubbed', 'dubbing' or 'failed'. + """ + + target_languages: typing.List[str] = pydantic.Field() + """ + The target languages of the dubbing project. + """ + + media_metadata: typing.Optional[DubbingMediaMetadata] = pydantic.Field(default=None) + """ + The media metadata of the dubbing project. + """ + + error: typing.Optional[str] = pydantic.Field(default=None) + """ + Optional error message if the dubbing project failed. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/review_task_instance_response_model.py b/src/elevenlabs/types/dubbing_resource.py similarity index 53% rename from src/elevenlabs/types/review_task_instance_response_model.py rename to src/elevenlabs/types/dubbing_resource.py index 0bea3547..e3f168f7 100644 --- a/src/elevenlabs/types/review_task_instance_response_model.py +++ b/src/elevenlabs/types/dubbing_resource.py @@ -1,19 +1,23 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel -from .review_state import ReviewState import typing -from .task_instance_event_response_model import TaskInstanceEventResponseModel +from .dubbing_media_reference import DubbingMediaReference +from .speaker_track import SpeakerTrack +from .speaker_segment import SpeakerSegment from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -class ReviewTaskInstanceResponseModel(UncheckedBaseModel): +class DubbingResource(UncheckedBaseModel): id: str - owner_id: str - description_id: str - state: ReviewState - events: typing.List[TaskInstanceEventResponseModel] + version: int + source_language: str + target_languages: typing.List[str] + background: DubbingMediaReference + foreground: DubbingMediaReference + speaker_tracks: typing.Dict[str, SpeakerTrack] + speaker_segments: typing.Dict[str, SpeakerSegment] if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/edit_voice_response_model.py b/src/elevenlabs/types/edit_voice_response_model.py new file mode 100644 index 00000000..d9b3c8d7 --- /dev/null +++ b/src/elevenlabs/types/edit_voice_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class EditVoiceResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the voice edit request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/edit_voice_settings_response_model.py b/src/elevenlabs/types/edit_voice_settings_response_model.py new file mode 100644 index 00000000..8fc3de99 --- /dev/null +++ b/src/elevenlabs/types/edit_voice_settings_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class EditVoiceSettingsResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the voice settings edit request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/embedding_model_enum.py b/src/elevenlabs/types/embedding_model_enum.py new file mode 100644 index 00000000..75baa0ae --- /dev/null +++ b/src/elevenlabs/types/embedding_model_enum.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EmbeddingModelEnum = typing.Union[typing.Literal["e5_mistral_7b_instruct", "gte_Qwen2_15B_instruct"], typing.Any] diff --git a/src/elevenlabs/types/fine_tuning_response.py b/src/elevenlabs/types/fine_tuning_response.py index 398fb73c..35bfe315 100644 --- a/src/elevenlabs/types/fine_tuning_response.py +++ b/src/elevenlabs/types/fine_tuning_response.py @@ -2,28 +2,84 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing +import pydantic from .fine_tuning_response_model_state_value import FineTuningResponseModelStateValue from .verification_attempt_response import VerificationAttemptResponse from .manual_verification_response import ManualVerificationResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class FineTuningResponse(UncheckedBaseModel): - is_allowed_to_fine_tune: typing.Optional[bool] = None - state: typing.Optional[typing.Dict[str, FineTuningResponseModelStateValue]] = None - verification_failures: typing.Optional[typing.List[str]] = None - verification_attempts_count: typing.Optional[int] = None - manual_verification_requested: typing.Optional[bool] = None - language: typing.Optional[str] = None - progress: typing.Optional[typing.Dict[str, float]] = None - message: typing.Optional[typing.Dict[str, str]] = None - dataset_duration_seconds: typing.Optional[float] = None - verification_attempts: typing.Optional[typing.List[VerificationAttemptResponse]] = None - slice_ids: typing.Optional[typing.List[str]] = None - manual_verification: typing.Optional[ManualVerificationResponse] = None - max_verification_attempts: typing.Optional[int] = None - next_max_verification_attempts_reset_unix_ms: typing.Optional[int] = None + is_allowed_to_fine_tune: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the user is allowed to fine-tune the voice. + """ + + state: typing.Optional[typing.Dict[str, FineTuningResponseModelStateValue]] = pydantic.Field(default=None) + """ + The state of the fine-tuning process for each model. + """ + + verification_failures: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of verification failures in the fine-tuning process. + """ + + verification_attempts_count: typing.Optional[int] = pydantic.Field(default=None) + """ + The number of verification attempts in the fine-tuning process. + """ + + manual_verification_requested: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether a manual verification was requested for the fine-tuning process. + """ + + language: typing.Optional[str] = pydantic.Field(default=None) + """ + The language of the fine-tuning process. + """ + + progress: typing.Optional[typing.Dict[str, float]] = pydantic.Field(default=None) + """ + The progress of the fine-tuning process. + """ + + message: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None) + """ + The message of the fine-tuning process. + """ + + dataset_duration_seconds: typing.Optional[float] = pydantic.Field(default=None) + """ + The duration of the dataset in seconds. + """ + + verification_attempts: typing.Optional[typing.List[VerificationAttemptResponse]] = pydantic.Field(default=None) + """ + The number of verification attempts. + """ + + slice_ids: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of slice IDs. + """ + + manual_verification: typing.Optional[ManualVerificationResponse] = pydantic.Field(default=None) + """ + The manual verification of the fine-tuning process. + """ + + max_verification_attempts: typing.Optional[int] = pydantic.Field(default=None) + """ + The maximum number of verification attempts. + """ + + next_max_verification_attempts_reset_unix_ms: typing.Optional[int] = pydantic.Field(default=None) + """ + The next maximum verification attempts reset time in Unix milliseconds. + """ + finetuning_state: typing.Optional[typing.Optional[typing.Any]] = None if IS_PYDANTIC_V2: diff --git a/src/elevenlabs/types/get_agent_response_model.py b/src/elevenlabs/types/get_agent_response_model.py index 7c1ab6a9..89119255 100644 --- a/src/elevenlabs/types/get_agent_response_model.py +++ b/src/elevenlabs/types/get_agent_response_model.py @@ -4,11 +4,10 @@ from ..core.unchecked_base_model import UncheckedBaseModel from .array_json_schema_property import ArrayJsonSchemaProperty from .object_json_schema_property import ObjectJsonSchemaProperty -from .conversational_config import ConversationalConfig +from .conversational_config_api_model import ConversationalConfigApiModel from .agent_metadata_response_model import AgentMetadataResponseModel import typing from .agent_platform_settings_response_model import AgentPlatformSettingsResponseModel -from .conv_ai_stored_secret_config import ConvAiStoredSecretConfig from .get_phone_number_response_model import GetPhoneNumberResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -18,10 +17,9 @@ class GetAgentResponseModel(UncheckedBaseModel): agent_id: str name: str - conversation_config: ConversationalConfig + conversation_config: ConversationalConfigApiModel metadata: AgentMetadataResponseModel platform_settings: typing.Optional[AgentPlatformSettingsResponseModel] = None - secrets: typing.List[ConvAiStoredSecretConfig] phone_numbers: typing.Optional[typing.List[GetPhoneNumberResponseModel]] = None if IS_PYDANTIC_V2: diff --git a/src/elevenlabs/types/get_audio_native_project_settings_response_model.py b/src/elevenlabs/types/get_audio_native_project_settings_response_model.py index 57be4dbd..e89afb01 100644 --- a/src/elevenlabs/types/get_audio_native_project_settings_response_model.py +++ b/src/elevenlabs/types/get_audio_native_project_settings_response_model.py @@ -1,16 +1,27 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +import typing from .audio_native_project_settings_response_model import AudioNativeProjectSettingsResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import typing -import pydantic class GetAudioNativeProjectSettingsResponseModel(UncheckedBaseModel): - enabled: bool - snapshot_id: str - settings: AudioNativeProjectSettingsResponseModel + enabled: bool = pydantic.Field() + """ + Whether the project is enabled. + """ + + snapshot_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the latest snapshot of the project. + """ + + settings: typing.Optional[AudioNativeProjectSettingsResponseModel] = pydantic.Field(default=None) + """ + The settings of the project. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/get_convai_settings_response_model.py b/src/elevenlabs/types/get_conv_ai_settings_response_model.py similarity index 79% rename from src/elevenlabs/types/get_convai_settings_response_model.py rename to src/elevenlabs/types/get_conv_ai_settings_response_model.py index 8810c5b2..33e35d26 100644 --- a/src/elevenlabs/types/get_convai_settings_response_model.py +++ b/src/elevenlabs/types/get_conv_ai_settings_response_model.py @@ -4,15 +4,13 @@ import typing from .conversation_initiation_client_data_webhook import ConversationInitiationClientDataWebhook from .conv_ai_webhooks import ConvAiWebhooks -from .conv_ai_workspace_stored_secret_config import ConvAiWorkspaceStoredSecretConfig from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -class GetConvaiSettingsResponseModel(UncheckedBaseModel): +class GetConvAiSettingsResponseModel(UncheckedBaseModel): conversation_initiation_client_data_webhook: typing.Optional[ConversationInitiationClientDataWebhook] = None webhooks: typing.Optional[ConvAiWebhooks] = None - secrets: typing.List[ConvAiWorkspaceStoredSecretConfig] if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/get_knowledge_base_response_model.py b/src/elevenlabs/types/get_knowledge_base_response_model.py index 3909eb51..02175daf 100644 --- a/src/elevenlabs/types/get_knowledge_base_response_model.py +++ b/src/elevenlabs/types/get_knowledge_base_response_model.py @@ -2,6 +2,7 @@ from ..core.unchecked_base_model import UncheckedBaseModel from .get_knowledge_base_response_model_type import GetKnowledgeBaseResponseModelType +from .knowledge_base_document_metadata_response_model import KnowledgeBaseDocumentMetadataResponseModel from .get_knowledge_base_response_model_access_level import GetKnowledgeBaseResponseModelAccessLevel from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing @@ -12,7 +13,9 @@ class GetKnowledgeBaseResponseModel(UncheckedBaseModel): id: str name: str type: GetKnowledgeBaseResponseModelType + metadata: KnowledgeBaseDocumentMetadataResponseModel extracted_inner_html: str + prompt_injectable: bool access_level: GetKnowledgeBaseResponseModelAccessLevel if IS_PYDANTIC_V2: diff --git a/src/elevenlabs/types/get_knowledge_base_summary_response_model.py b/src/elevenlabs/types/get_knowledge_base_summary_response_model.py index 057ffed3..402ea3ce 100644 --- a/src/elevenlabs/types/get_knowledge_base_summary_response_model.py +++ b/src/elevenlabs/types/get_knowledge_base_summary_response_model.py @@ -2,6 +2,7 @@ from ..core.unchecked_base_model import UncheckedBaseModel from .get_knowledge_base_summary_response_model_type import GetKnowledgeBaseSummaryResponseModelType +from .knowledge_base_document_metadata_response_model import KnowledgeBaseDocumentMetadataResponseModel import typing from .get_knowledge_base_summary_response_model_dependent_agents_item import ( GetKnowledgeBaseSummaryResponseModelDependentAgentsItem, @@ -15,6 +16,8 @@ class GetKnowledgeBaseSummaryResponseModel(UncheckedBaseModel): id: str name: str type: GetKnowledgeBaseSummaryResponseModelType + metadata: KnowledgeBaseDocumentMetadataResponseModel + prompt_injectable: bool dependent_agents: typing.List[GetKnowledgeBaseSummaryResponseModelDependentAgentsItem] access_level: GetKnowledgeBaseSummaryResponseModelAccessLevel diff --git a/src/elevenlabs/types/get_library_voices_response.py b/src/elevenlabs/types/get_library_voices_response.py index 7d2da37c..24762451 100644 --- a/src/elevenlabs/types/get_library_voices_response.py +++ b/src/elevenlabs/types/get_library_voices_response.py @@ -3,13 +3,21 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .library_voice_response import LibraryVoiceResponse -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class GetLibraryVoicesResponse(UncheckedBaseModel): - voices: typing.List[LibraryVoiceResponse] - has_more: bool + voices: typing.List[LibraryVoiceResponse] = pydantic.Field() + """ + The list of shared voices + """ + + has_more: bool = pydantic.Field() + """ + Whether there are more shared voices in subsequent pages. + """ + last_sort_id: typing.Optional[str] = None if IS_PYDANTIC_V2: diff --git a/src/elevenlabs/types/get_projects_response.py b/src/elevenlabs/types/get_projects_response.py index d57558aa..d9be69f2 100644 --- a/src/elevenlabs/types/get_projects_response.py +++ b/src/elevenlabs/types/get_projects_response.py @@ -3,12 +3,15 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .project_response import ProjectResponse -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class GetProjectsResponse(UncheckedBaseModel): - projects: typing.List[ProjectResponse] + projects: typing.List[ProjectResponse] = pydantic.Field() + """ + A list of projects with their metadata. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/get_pronunciation_dictionaries_metadata_response_model.py b/src/elevenlabs/types/get_pronunciation_dictionaries_metadata_response_model.py index 2ed54ef9..47ff51b8 100644 --- a/src/elevenlabs/types/get_pronunciation_dictionaries_metadata_response_model.py +++ b/src/elevenlabs/types/get_pronunciation_dictionaries_metadata_response_model.py @@ -3,14 +3,25 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .get_pronunciation_dictionary_metadata_response import GetPronunciationDictionaryMetadataResponse -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class GetPronunciationDictionariesMetadataResponseModel(UncheckedBaseModel): - pronunciation_dictionaries: typing.List[GetPronunciationDictionaryMetadataResponse] - next_cursor: str - has_more: bool + pronunciation_dictionaries: typing.List[GetPronunciationDictionaryMetadataResponse] = pydantic.Field() + """ + A list of pronunciation dictionaries and their metadata. + """ + + next_cursor: typing.Optional[str] = pydantic.Field(default=None) + """ + The next cursor to use for pagination. + """ + + has_more: bool = pydantic.Field() + """ + Whether there are more pronunciation dictionaries to fetch. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/get_pronunciation_dictionary_metadata_response.py b/src/elevenlabs/types/get_pronunciation_dictionary_metadata_response.py index c6fd50fc..f65de3a0 100644 --- a/src/elevenlabs/types/get_pronunciation_dictionary_metadata_response.py +++ b/src/elevenlabs/types/get_pronunciation_dictionary_metadata_response.py @@ -1,18 +1,41 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class GetPronunciationDictionaryMetadataResponse(UncheckedBaseModel): - id: str - latest_version_id: str - name: str - created_by: str - creation_time_unix: int - description: typing.Optional[str] = None + id: str = pydantic.Field() + """ + The ID of the pronunciation dictionary. + """ + + latest_version_id: str = pydantic.Field() + """ + The ID of the latest version of the pronunciation dictionary. + """ + + name: str = pydantic.Field() + """ + The name of the pronunciation dictionary. + """ + + created_by: str = pydantic.Field() + """ + The user ID of the creator of the pronunciation dictionary. + """ + + creation_time_unix: int = pydantic.Field() + """ + The creation time of the pronunciation dictionary in Unix timestamp. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + The description of the pronunciation dictionary. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/get_voices_response.py b/src/elevenlabs/types/get_voices_response.py index 8a636c32..2bbc47c6 100644 --- a/src/elevenlabs/types/get_voices_response.py +++ b/src/elevenlabs/types/get_voices_response.py @@ -3,12 +3,15 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .voice import Voice -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class GetVoicesResponse(UncheckedBaseModel): - voices: typing.List[Voice] + voices: typing.List[Voice] = pydantic.Field() + """ + A list of available voices. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/knowledge_base_document_metadata_response_model.py b/src/elevenlabs/types/knowledge_base_document_metadata_response_model.py new file mode 100644 index 00000000..702060e7 --- /dev/null +++ b/src/elevenlabs/types/knowledge_base_document_metadata_response_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class KnowledgeBaseDocumentMetadataResponseModel(UncheckedBaseModel): + created_at_unix_secs: int + last_updated_at_unix_secs: int + size_bytes: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/knowledge_base_locator.py b/src/elevenlabs/types/knowledge_base_locator.py index 95aa389c..2fe0b196 100644 --- a/src/elevenlabs/types/knowledge_base_locator.py +++ b/src/elevenlabs/types/knowledge_base_locator.py @@ -2,8 +2,9 @@ from ..core.unchecked_base_model import UncheckedBaseModel from .knowledge_base_locator_type import KnowledgeBaseLocatorType -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing +from .document_usage_mode_enum import DocumentUsageModeEnum +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -11,6 +12,7 @@ class KnowledgeBaseLocator(UncheckedBaseModel): type: KnowledgeBaseLocatorType name: str id: str + usage_mode: typing.Optional[DocumentUsageModeEnum] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/conv_ai_new_secret_config.py b/src/elevenlabs/types/language_added_response.py similarity index 86% rename from src/elevenlabs/types/conv_ai_new_secret_config.py rename to src/elevenlabs/types/language_added_response.py index 4276a25e..7611ef95 100644 --- a/src/elevenlabs/types/conv_ai_new_secret_config.py +++ b/src/elevenlabs/types/language_added_response.py @@ -6,9 +6,8 @@ import pydantic -class ConvAiNewSecretConfig(UncheckedBaseModel): - name: str - value: str +class LanguageAddedResponse(UncheckedBaseModel): + version: int if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/library_voice_response.py b/src/elevenlabs/types/library_voice_response.py index 1cbfed94..81c09f94 100644 --- a/src/elevenlabs/types/library_voice_response.py +++ b/src/elevenlabs/types/library_voice_response.py @@ -1,47 +1,166 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from .library_voice_response_model_category import LibraryVoiceResponseModelCategory +import typing import typing_extensions from ..core.serialization import FieldMetadata -import typing from .verified_voice_language_response_model import VerifiedVoiceLanguageResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class LibraryVoiceResponse(UncheckedBaseModel): - public_owner_id: str - voice_id: str - date_unix: int - name: str - accent: str - gender: str - age: str - descriptive: str - use_case: str - category: LibraryVoiceResponseModelCategory - language: str - description: str - preview_url: str - usage_character_count_1_y: typing_extensions.Annotated[int, FieldMetadata(alias="usage_character_count_1y")] - usage_character_count_7_d: typing_extensions.Annotated[int, FieldMetadata(alias="usage_character_count_7d")] + public_owner_id: str = pydantic.Field() + """ + The public owner id of the voice. + """ + + voice_id: str = pydantic.Field() + """ + The id of the voice. + """ + + date_unix: int = pydantic.Field() + """ + The date the voice was added to the library in Unix time. + """ + + name: str = pydantic.Field() + """ + The name of the voice. + """ + + accent: str = pydantic.Field() + """ + The accent of the voice. + """ + + gender: str = pydantic.Field() + """ + The gender of the voice. + """ + + age: str = pydantic.Field() + """ + The age of the voice. + """ + + descriptive: str = pydantic.Field() + """ + The descriptive of the voice. + """ + + use_case: str = pydantic.Field() + """ + The use case of the voice. + """ + + category: LibraryVoiceResponseModelCategory = pydantic.Field() + """ + The category of the voice. + """ + + language: typing.Optional[str] = pydantic.Field(default=None) + """ + The language of the voice. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + The description of the voice. + """ + + preview_url: typing.Optional[str] = pydantic.Field(default=None) + """ + The preview URL of the voice. + """ + + usage_character_count_1_y: typing_extensions.Annotated[int, FieldMetadata(alias="usage_character_count_1y")] = ( + pydantic.Field() + ) + """ + The usage character count of the voice in the last year. + """ + + usage_character_count_7_d: typing_extensions.Annotated[int, FieldMetadata(alias="usage_character_count_7d")] = ( + pydantic.Field() + ) + """ + The usage character count of the voice in the last 7 days. + """ + play_api_usage_character_count_1_y: typing_extensions.Annotated[ int, FieldMetadata(alias="play_api_usage_character_count_1y") - ] - cloned_by_count: int - rate: float - free_users_allowed: bool - live_moderation_enabled: bool - featured: bool - verified_languages: typing.Optional[typing.List[VerifiedVoiceLanguageResponseModel]] = None - notice_period: typing.Optional[int] = None - instagram_username: typing.Optional[str] = None - twitter_username: typing.Optional[str] = None - youtube_username: typing.Optional[str] = None - tiktok_username: typing.Optional[str] = None - image_url: typing.Optional[str] = None - is_added_by_user: typing.Optional[bool] = None + ] = pydantic.Field() + """ + The play API usage character count of the voice in the last year. + """ + + cloned_by_count: int = pydantic.Field() + """ + The number of times the voice has been cloned. + """ + + rate: typing.Optional[float] = pydantic.Field(default=None) + """ + The rate of the voice. + """ + + free_users_allowed: bool = pydantic.Field() + """ + Whether free users are allowed to use the voice. + """ + + live_moderation_enabled: bool = pydantic.Field() + """ + Whether live moderation is enabled for the voice. + """ + + featured: bool = pydantic.Field() + """ + Whether the voice is featured. + """ + + verified_languages: typing.Optional[typing.List[VerifiedVoiceLanguageResponseModel]] = pydantic.Field(default=None) + """ + The verified languages of the voice. + """ + + notice_period: typing.Optional[int] = pydantic.Field(default=None) + """ + The notice period of the voice. + """ + + instagram_username: typing.Optional[str] = pydantic.Field(default=None) + """ + The Instagram username of the voice. + """ + + twitter_username: typing.Optional[str] = pydantic.Field(default=None) + """ + The Twitter username of the voice. + """ + + youtube_username: typing.Optional[str] = pydantic.Field(default=None) + """ + The YouTube username of the voice. + """ + + tiktok_username: typing.Optional[str] = pydantic.Field(default=None) + """ + The TikTok username of the voice. + """ + + image_url: typing.Optional[str] = pydantic.Field(default=None) + """ + The image URL of the voice. + """ + + is_added_by_user: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the voice was added by the user. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/llm.py b/src/elevenlabs/types/llm.py index 69d678f6..df8b0f03 100644 --- a/src/elevenlabs/types/llm.py +++ b/src/elevenlabs/types/llm.py @@ -12,8 +12,11 @@ "gemini-1.5-pro", "gemini-1.5-flash", "gemini-2.0-flash-001", + "gemini-2.0-flash-lite", "gemini-1.0-pro", + "claude-3-7-sonnet", "claude-3-5-sonnet", + "claude-3-5-sonnet-v1", "claude-3-haiku", "grok-beta", "custom-llm", diff --git a/src/elevenlabs/types/manual_verification_file_response.py b/src/elevenlabs/types/manual_verification_file_response.py index dcf08552..6fee3ecd 100644 --- a/src/elevenlabs/types/manual_verification_file_response.py +++ b/src/elevenlabs/types/manual_verification_file_response.py @@ -1,17 +1,36 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class ManualVerificationFileResponse(UncheckedBaseModel): - file_id: str - file_name: str - mime_type: str - size_bytes: int - upload_date_unix: int + file_id: str = pydantic.Field() + """ + The ID of the file. + """ + + file_name: str = pydantic.Field() + """ + The name of the file. + """ + + mime_type: str = pydantic.Field() + """ + The MIME type of the file. + """ + + size_bytes: int = pydantic.Field() + """ + The size of the file in bytes. + """ + + upload_date_unix: int = pydantic.Field() + """ + The date of the file in Unix time. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/manual_verification_response.py b/src/elevenlabs/types/manual_verification_response.py index 1f92302c..a85e4d92 100644 --- a/src/elevenlabs/types/manual_verification_response.py +++ b/src/elevenlabs/types/manual_verification_response.py @@ -1,16 +1,27 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from .manual_verification_file_response import ManualVerificationFileResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class ManualVerificationResponse(UncheckedBaseModel): - extra_text: str - request_time_unix: int - files: typing.List[ManualVerificationFileResponse] + extra_text: str = pydantic.Field() + """ + The extra text of the manual verification. + """ + + request_time_unix: int = pydantic.Field() + """ + The date of the manual verification in Unix time. + """ + + files: typing.List[ManualVerificationFileResponse] = pydantic.Field() + """ + The files of the manual verification. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/podcast_bulletin_mode.py b/src/elevenlabs/types/podcast_bulletin_mode.py index 956aefb1..270f8490 100644 --- a/src/elevenlabs/types/podcast_bulletin_mode.py +++ b/src/elevenlabs/types/podcast_bulletin_mode.py @@ -2,13 +2,16 @@ from ..core.unchecked_base_model import UncheckedBaseModel from .podcast_bulletin_mode_data import PodcastBulletinModeData +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class PodcastBulletinMode(UncheckedBaseModel): - bulletin: PodcastBulletinModeData + bulletin: PodcastBulletinModeData = pydantic.Field() + """ + The voice settings for the bulletin. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/podcast_bulletin_mode_data.py b/src/elevenlabs/types/podcast_bulletin_mode_data.py index 5d5391e3..2bd0766c 100644 --- a/src/elevenlabs/types/podcast_bulletin_mode_data.py +++ b/src/elevenlabs/types/podcast_bulletin_mode_data.py @@ -1,13 +1,16 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class PodcastBulletinModeData(UncheckedBaseModel): - host_voice_id: str + host_voice_id: str = pydantic.Field() + """ + The ID of the host voice. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/podcast_conversation_mode.py b/src/elevenlabs/types/podcast_conversation_mode.py index 0beb1f3f..ed013a27 100644 --- a/src/elevenlabs/types/podcast_conversation_mode.py +++ b/src/elevenlabs/types/podcast_conversation_mode.py @@ -2,13 +2,16 @@ from ..core.unchecked_base_model import UncheckedBaseModel from .podcast_conversation_mode_data import PodcastConversationModeData +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class PodcastConversationMode(UncheckedBaseModel): - conversation: PodcastConversationModeData + conversation: PodcastConversationModeData = pydantic.Field() + """ + The voice settings for the conversation. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/podcast_conversation_mode_data.py b/src/elevenlabs/types/podcast_conversation_mode_data.py index 188fa229..2273f5f9 100644 --- a/src/elevenlabs/types/podcast_conversation_mode_data.py +++ b/src/elevenlabs/types/podcast_conversation_mode_data.py @@ -1,14 +1,21 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class PodcastConversationModeData(UncheckedBaseModel): - host_voice_id: str - guest_voice_id: str + host_voice_id: str = pydantic.Field() + """ + The ID of the host voice. + """ + + guest_voice_id: str = pydantic.Field() + """ + The ID of the guest voice. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/podcast_project_response_model.py b/src/elevenlabs/types/podcast_project_response_model.py index 88030016..53f028a7 100644 --- a/src/elevenlabs/types/podcast_project_response_model.py +++ b/src/elevenlabs/types/podcast_project_response_model.py @@ -2,13 +2,16 @@ from ..core.unchecked_base_model import UncheckedBaseModel from .project_response import ProjectResponse +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class PodcastProjectResponseModel(UncheckedBaseModel): - project: ProjectResponse + project: ProjectResponse = pydantic.Field() + """ + The project associated with the created podcast. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/podcast_text_source.py b/src/elevenlabs/types/podcast_text_source.py index ff23cab1..b6f0d077 100644 --- a/src/elevenlabs/types/podcast_text_source.py +++ b/src/elevenlabs/types/podcast_text_source.py @@ -1,13 +1,16 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class PodcastTextSource(UncheckedBaseModel): - text: str + text: str = pydantic.Field() + """ + The text to create the podcast from. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/podcast_url_source.py b/src/elevenlabs/types/podcast_url_source.py index 70255e3a..c49abedd 100644 --- a/src/elevenlabs/types/podcast_url_source.py +++ b/src/elevenlabs/types/podcast_url_source.py @@ -1,13 +1,16 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class PodcastUrlSource(UncheckedBaseModel): - url: str + url: str = pydantic.Field() + """ + The URL to create the podcast from. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/project_creation_meta_response_model.py b/src/elevenlabs/types/project_creation_meta_response_model.py index fb16cd92..36764b63 100644 --- a/src/elevenlabs/types/project_creation_meta_response_model.py +++ b/src/elevenlabs/types/project_creation_meta_response_model.py @@ -1,17 +1,28 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from .project_creation_meta_response_model_status import ProjectCreationMetaResponseModelStatus from .project_creation_meta_response_model_type import ProjectCreationMetaResponseModelType from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class ProjectCreationMetaResponseModel(UncheckedBaseModel): - creation_progress: float - status: ProjectCreationMetaResponseModelStatus - type: ProjectCreationMetaResponseModelType + creation_progress: float = pydantic.Field() + """ + The progress of the project creation. + """ + + status: ProjectCreationMetaResponseModelStatus = pydantic.Field() + """ + The status of the project creation action. + """ + + type: ProjectCreationMetaResponseModelType = pydantic.Field() + """ + The type of the project creation action. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/project_extended_response_model.py b/src/elevenlabs/types/project_extended_response_model.py index b40a4039..9582d2b8 100644 --- a/src/elevenlabs/types/project_extended_response_model.py +++ b/src/elevenlabs/types/project_extended_response_model.py @@ -1,52 +1,181 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from .project_extended_response_model_target_audience import ProjectExtendedResponseModelTargetAudience from .project_state import ProjectState from .project_extended_response_model_access_level import ProjectExtendedResponseModelAccessLevel from .project_extended_response_model_fiction import ProjectExtendedResponseModelFiction from .project_creation_meta_response_model import ProjectCreationMetaResponseModel +from .project_extended_response_model_source_type import ProjectExtendedResponseModelSourceType from .project_extended_response_model_quality_preset import ProjectExtendedResponseModelQualityPreset from .chapter_response import ChapterResponse from .pronunciation_dictionary_version_response_model import PronunciationDictionaryVersionResponseModel from .project_extended_response_model_apply_text_normalization import ProjectExtendedResponseModelApplyTextNormalization from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class ProjectExtendedResponseModel(UncheckedBaseModel): - project_id: str - name: str - create_date_unix: int - default_title_voice_id: str - default_paragraph_voice_id: str - default_model_id: str - last_conversion_date_unix: typing.Optional[int] = None - can_be_downloaded: bool - title: typing.Optional[str] = None - author: typing.Optional[str] = None - description: typing.Optional[str] = None - genres: typing.Optional[typing.List[str]] = None - cover_image_url: typing.Optional[str] = None - target_audience: typing.Optional[ProjectExtendedResponseModelTargetAudience] = None - language: typing.Optional[str] = None - content_type: typing.Optional[str] = None - original_publication_date: typing.Optional[str] = None - mature_content: typing.Optional[bool] = None - isbn_number: typing.Optional[str] = None - volume_normalization: bool - state: ProjectState - access_level: ProjectExtendedResponseModelAccessLevel - fiction: typing.Optional[ProjectExtendedResponseModelFiction] = None - quality_check_on: bool - quality_check_on_when_bulk_convert: bool - creation_meta: typing.Optional[ProjectCreationMetaResponseModel] = None - quality_preset: ProjectExtendedResponseModelQualityPreset - chapters: typing.List[ChapterResponse] - pronunciation_dictionary_versions: typing.List[PronunciationDictionaryVersionResponseModel] - apply_text_normalization: ProjectExtendedResponseModelApplyTextNormalization - experimental: typing.Dict[str, typing.Optional[typing.Any]] + project_id: str = pydantic.Field() + """ + The ID of the project. + """ + + name: str = pydantic.Field() + """ + The name of the project. + """ + + create_date_unix: int = pydantic.Field() + """ + The creation date of the project. + """ + + default_title_voice_id: str = pydantic.Field() + """ + The default title voice ID. + """ + + default_paragraph_voice_id: str = pydantic.Field() + """ + The default paragraph voice ID. + """ + + default_model_id: str = pydantic.Field() + """ + The default model ID. + """ + + last_conversion_date_unix: typing.Optional[int] = pydantic.Field(default=None) + """ + The last conversion date of the project. + """ + + can_be_downloaded: bool = pydantic.Field() + """ + Whether the project can be downloaded. + """ + + title: typing.Optional[str] = pydantic.Field(default=None) + """ + The title of the project. + """ + + author: typing.Optional[str] = pydantic.Field(default=None) + """ + The author of the project. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + The description of the project. + """ + + genres: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of genres of the project. + """ + + cover_image_url: typing.Optional[str] = pydantic.Field(default=None) + """ + The cover image URL of the project. + """ + + target_audience: typing.Optional[ProjectExtendedResponseModelTargetAudience] = pydantic.Field(default=None) + """ + The target audience of the project. + """ + + language: typing.Optional[str] = pydantic.Field(default=None) + """ + Two-letter language code (ISO 639-1) of the language of the project. + """ + + content_type: typing.Optional[str] = pydantic.Field(default=None) + """ + The content type of the project, e.g. 'Novel' or 'Short Story' + """ + + original_publication_date: typing.Optional[str] = pydantic.Field(default=None) + """ + The original publication date of the project. + """ + + mature_content: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the project contains mature content. + """ + + isbn_number: typing.Optional[str] = pydantic.Field(default=None) + """ + The ISBN number of the project. + """ + + volume_normalization: bool = pydantic.Field() + """ + Whether the project uses volume normalization. + """ + + state: ProjectState = pydantic.Field() + """ + The state of the project. + """ + + access_level: ProjectExtendedResponseModelAccessLevel = pydantic.Field() + """ + The access level of the project. + """ + + fiction: typing.Optional[ProjectExtendedResponseModelFiction] = pydantic.Field(default=None) + """ + Whether the project is fiction. + """ + + quality_check_on: bool = pydantic.Field() + """ + Whether quality check is enabled for this project. + """ + + quality_check_on_when_bulk_convert: bool = pydantic.Field() + """ + Whether quality check is enabled on the project when bulk converting. + """ + + creation_meta: typing.Optional[ProjectCreationMetaResponseModel] = pydantic.Field(default=None) + """ + The creation meta of the project. + """ + + source_type: typing.Optional[ProjectExtendedResponseModelSourceType] = pydantic.Field(default=None) + """ + The source type of the project. + """ + + quality_preset: ProjectExtendedResponseModelQualityPreset = pydantic.Field() + """ + The quality preset level of the project. + """ + + chapters: typing.List[ChapterResponse] = pydantic.Field() + """ + List of chapters of the project and their metadata. + """ + + pronunciation_dictionary_versions: typing.List[PronunciationDictionaryVersionResponseModel] = pydantic.Field() + """ + List of pronunciation dictionary versions of the project and their metadata. + """ + + apply_text_normalization: ProjectExtendedResponseModelApplyTextNormalization = pydantic.Field() + """ + Whether text normalization is applied to the project. + """ + + experimental: typing.Dict[str, typing.Optional[typing.Any]] = pydantic.Field() + """ + Experimental features of the project. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/project_extended_response_model_source_type.py b/src/elevenlabs/types/project_extended_response_model_source_type.py new file mode 100644 index 00000000..9abb8c76 --- /dev/null +++ b/src/elevenlabs/types/project_extended_response_model_source_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectExtendedResponseModelSourceType = typing.Union[typing.Literal["blank", "book", "article", "genfm"], typing.Any] diff --git a/src/elevenlabs/types/project_response.py b/src/elevenlabs/types/project_response.py index 23936047..ca74ec14 100644 --- a/src/elevenlabs/types/project_response.py +++ b/src/elevenlabs/types/project_response.py @@ -1,43 +1,152 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from .project_response_model_target_audience import ProjectResponseModelTargetAudience from .project_state import ProjectState from .project_response_model_access_level import ProjectResponseModelAccessLevel from .project_response_model_fiction import ProjectResponseModelFiction from .project_creation_meta_response_model import ProjectCreationMetaResponseModel +from .project_response_model_source_type import ProjectResponseModelSourceType from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class ProjectResponse(UncheckedBaseModel): - project_id: str - name: str - create_date_unix: int - default_title_voice_id: str - default_paragraph_voice_id: str - default_model_id: str - last_conversion_date_unix: typing.Optional[int] = None - can_be_downloaded: bool - title: typing.Optional[str] = None - author: typing.Optional[str] = None - description: typing.Optional[str] = None - genres: typing.Optional[typing.List[str]] = None - cover_image_url: typing.Optional[str] = None - target_audience: typing.Optional[ProjectResponseModelTargetAudience] = None - language: typing.Optional[str] = None - content_type: typing.Optional[str] = None - original_publication_date: typing.Optional[str] = None - mature_content: typing.Optional[bool] = None - isbn_number: typing.Optional[str] = None - volume_normalization: bool - state: ProjectState - access_level: ProjectResponseModelAccessLevel - fiction: typing.Optional[ProjectResponseModelFiction] = None - quality_check_on: bool - quality_check_on_when_bulk_convert: bool - creation_meta: typing.Optional[ProjectCreationMetaResponseModel] = None + project_id: str = pydantic.Field() + """ + The ID of the project. + """ + + name: str = pydantic.Field() + """ + The name of the project. + """ + + create_date_unix: int = pydantic.Field() + """ + The creation date of the project. + """ + + default_title_voice_id: str = pydantic.Field() + """ + The default title voice ID. + """ + + default_paragraph_voice_id: str = pydantic.Field() + """ + The default paragraph voice ID. + """ + + default_model_id: str = pydantic.Field() + """ + The default model ID. + """ + + last_conversion_date_unix: typing.Optional[int] = pydantic.Field(default=None) + """ + The last conversion date of the project. + """ + + can_be_downloaded: bool = pydantic.Field() + """ + Whether the project can be downloaded. + """ + + title: typing.Optional[str] = pydantic.Field(default=None) + """ + The title of the project. + """ + + author: typing.Optional[str] = pydantic.Field(default=None) + """ + The author of the project. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + The description of the project. + """ + + genres: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + List of genres of the project. + """ + + cover_image_url: typing.Optional[str] = pydantic.Field(default=None) + """ + The cover image URL of the project. + """ + + target_audience: typing.Optional[ProjectResponseModelTargetAudience] = pydantic.Field(default=None) + """ + The target audience of the project. + """ + + language: typing.Optional[str] = pydantic.Field(default=None) + """ + Two-letter language code (ISO 639-1) of the language of the project. + """ + + content_type: typing.Optional[str] = pydantic.Field(default=None) + """ + The content type of the project, e.g. 'Novel' or 'Short Story' + """ + + original_publication_date: typing.Optional[str] = pydantic.Field(default=None) + """ + The original publication date of the project. + """ + + mature_content: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the project contains mature content. + """ + + isbn_number: typing.Optional[str] = pydantic.Field(default=None) + """ + The ISBN number of the project. + """ + + volume_normalization: bool = pydantic.Field() + """ + Whether the project uses volume normalization. + """ + + state: ProjectState = pydantic.Field() + """ + The state of the project. + """ + + access_level: ProjectResponseModelAccessLevel = pydantic.Field() + """ + The access level of the project. + """ + + fiction: typing.Optional[ProjectResponseModelFiction] = pydantic.Field(default=None) + """ + Whether the project is fiction. + """ + + quality_check_on: bool = pydantic.Field() + """ + Whether quality check is enabled for this project. + """ + + quality_check_on_when_bulk_convert: bool = pydantic.Field() + """ + Whether quality check is enabled on the project when bulk converting. + """ + + creation_meta: typing.Optional[ProjectCreationMetaResponseModel] = pydantic.Field(default=None) + """ + The creation meta of the project. + """ + + source_type: typing.Optional[ProjectResponseModelSourceType] = pydantic.Field(default=None) + """ + The source type of the project. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/project_response_model_source_type.py b/src/elevenlabs/types/project_response_model_source_type.py new file mode 100644 index 00000000..63c40537 --- /dev/null +++ b/src/elevenlabs/types/project_response_model_source_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProjectResponseModelSourceType = typing.Union[typing.Literal["blank", "book", "article", "genfm"], typing.Any] diff --git a/src/elevenlabs/types/project_snapshot_extended_response_model.py b/src/elevenlabs/types/project_snapshot_extended_response_model.py new file mode 100644 index 00000000..fe29d1e5 --- /dev/null +++ b/src/elevenlabs/types/project_snapshot_extended_response_model.py @@ -0,0 +1,51 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +import typing +from .project_snapshot_upload_response_model import ProjectSnapshotUploadResponseModel +from .character_alignment_model import CharacterAlignmentModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ProjectSnapshotExtendedResponseModel(UncheckedBaseModel): + project_snapshot_id: str = pydantic.Field() + """ + The ID of the project snapshot. + """ + + project_id: str = pydantic.Field() + """ + The ID of the project. + """ + + created_at_unix: int = pydantic.Field() + """ + The creation date of the project snapshot. + """ + + name: str = pydantic.Field() + """ + The name of the project snapshot. + """ + + audio_upload: typing.Optional[ProjectSnapshotUploadResponseModel] = pydantic.Field(default=None) + """ + The audio upload of the project snapshot. + """ + + zip_upload: typing.Optional[ProjectSnapshotUploadResponseModel] = pydantic.Field(default=None) + """ + The zip upload of the project snapshot. + """ + + character_alignments: typing.List[CharacterAlignmentModel] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/project_snapshot_response.py b/src/elevenlabs/types/project_snapshot_response.py index c11a98da..7a33b8f2 100644 --- a/src/elevenlabs/types/project_snapshot_response.py +++ b/src/elevenlabs/types/project_snapshot_response.py @@ -1,19 +1,42 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from .project_snapshot_upload_response_model import ProjectSnapshotUploadResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class ProjectSnapshotResponse(UncheckedBaseModel): - project_snapshot_id: str - project_id: str - created_at_unix: int - name: str - audio_upload: typing.Optional[ProjectSnapshotUploadResponseModel] = None - zip_upload: typing.Optional[ProjectSnapshotUploadResponseModel] = None + project_snapshot_id: str = pydantic.Field() + """ + The ID of the project snapshot. + """ + + project_id: str = pydantic.Field() + """ + The ID of the project. + """ + + created_at_unix: int = pydantic.Field() + """ + The creation date of the project snapshot. + """ + + name: str = pydantic.Field() + """ + The name of the project snapshot. + """ + + audio_upload: typing.Optional[ProjectSnapshotUploadResponseModel] = pydantic.Field(default=None) + """ + The audio upload of the project snapshot. + """ + + zip_upload: typing.Optional[ProjectSnapshotUploadResponseModel] = pydantic.Field(default=None) + """ + The zip upload of the project snapshot. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/project_snapshot_upload_response_model.py b/src/elevenlabs/types/project_snapshot_upload_response_model.py index c19ca85c..80f60c54 100644 --- a/src/elevenlabs/types/project_snapshot_upload_response_model.py +++ b/src/elevenlabs/types/project_snapshot_upload_response_model.py @@ -2,14 +2,21 @@ from ..core.unchecked_base_model import UncheckedBaseModel from .project_snapshot_upload_response_model_status import ProjectSnapshotUploadResponseModelStatus +import pydantic import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class ProjectSnapshotUploadResponseModel(UncheckedBaseModel): - status: ProjectSnapshotUploadResponseModelStatus - acx_volume_normalization: typing.Optional[bool] = None + status: ProjectSnapshotUploadResponseModelStatus = pydantic.Field() + """ + The status of the snapshot upload. + """ + + acx_volume_normalization: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether volume normalization was applied to the snapshot. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/project_snapshots_response.py b/src/elevenlabs/types/project_snapshots_response.py index 44dab723..c7d61724 100644 --- a/src/elevenlabs/types/project_snapshots_response.py +++ b/src/elevenlabs/types/project_snapshots_response.py @@ -3,12 +3,15 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .project_snapshot_response import ProjectSnapshotResponse -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ProjectSnapshotsResponse(UncheckedBaseModel): - snapshots: typing.List[ProjectSnapshotResponse] + snapshots: typing.List[ProjectSnapshotResponse] = pydantic.Field() + """ + List of project snapshots. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/prompt_agent.py b/src/elevenlabs/types/prompt_agent.py index 352280c1..6a2f1ef9 100644 --- a/src/elevenlabs/types/prompt_agent.py +++ b/src/elevenlabs/types/prompt_agent.py @@ -9,6 +9,7 @@ from .prompt_agent_tools_item import PromptAgentToolsItem from .knowledge_base_locator import KnowledgeBaseLocator from .custom_llm import CustomLlm +from .rag_config import RagConfig from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic from ..core.pydantic_utilities import update_forward_refs @@ -22,8 +23,8 @@ class PromptAgent(UncheckedBaseModel): tools: typing.Optional[typing.List[PromptAgentToolsItem]] = None tool_ids: typing.Optional[typing.List[str]] = None knowledge_base: typing.Optional[typing.List[KnowledgeBaseLocator]] = None - knowledge_base_document_ids: typing.Optional[typing.List[str]] = None custom_llm: typing.Optional[CustomLlm] = None + rag: typing.Optional[RagConfig] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/prompt_agent_tools_item.py b/src/elevenlabs/types/prompt_agent_tools_item.py index 32fcdf81..1dbfeb4c 100644 --- a/src/elevenlabs/types/prompt_agent_tools_item.py +++ b/src/elevenlabs/types/prompt_agent_tools_item.py @@ -6,6 +6,7 @@ from .object_json_schema_property import ObjectJsonSchemaProperty import typing from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig +from .dynamic_variables_config import DynamicVariablesConfig from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic import typing_extensions @@ -18,6 +19,7 @@ class PromptAgentToolsItem_Webhook(UncheckedBaseModel): name: str description: str api_schema: WebhookToolApiSchemaConfig + dynamic_variables: typing.Optional[DynamicVariablesConfig] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 @@ -36,6 +38,7 @@ class PromptAgentToolsItem_Client(UncheckedBaseModel): parameters: typing.Optional[ObjectJsonSchemaProperty] = None expects_response: typing.Optional[bool] = None response_timeout_secs: typing.Optional[int] = None + dynamic_variables: typing.Optional[DynamicVariablesConfig] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/pronunciation_dictionary_alias_rule_request_model.py b/src/elevenlabs/types/pronunciation_dictionary_alias_rule_request_model.py index f9e64249..1d324c3a 100644 --- a/src/elevenlabs/types/pronunciation_dictionary_alias_rule_request_model.py +++ b/src/elevenlabs/types/pronunciation_dictionary_alias_rule_request_model.py @@ -1,14 +1,21 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class PronunciationDictionaryAliasRuleRequestModel(UncheckedBaseModel): - string_to_replace: str - alias: str + string_to_replace: str = pydantic.Field() + """ + The string to replace. Must be a non-empty string. + """ + + alias: str = pydantic.Field() + """ + The alias for the string to be replaced. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/pronunciation_dictionary_phoneme_rule_request_model.py b/src/elevenlabs/types/pronunciation_dictionary_phoneme_rule_request_model.py index ed2b7faf..273495ff 100644 --- a/src/elevenlabs/types/pronunciation_dictionary_phoneme_rule_request_model.py +++ b/src/elevenlabs/types/pronunciation_dictionary_phoneme_rule_request_model.py @@ -1,15 +1,26 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class PronunciationDictionaryPhonemeRuleRequestModel(UncheckedBaseModel): - string_to_replace: str - phoneme: str - alphabet: str + string_to_replace: str = pydantic.Field() + """ + The string to replace. Must be a non-empty string. + """ + + phoneme: str = pydantic.Field() + """ + The phoneme rule. + """ + + alphabet: str = pydantic.Field() + """ + The alphabet to use with the phoneme rule. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/quote_response_model.py b/src/elevenlabs/types/quote_response_model.py deleted file mode 100644 index c0f749b3..00000000 --- a/src/elevenlabs/types/quote_response_model.py +++ /dev/null @@ -1,24 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import datetime as dt -from .quote_request_model import QuoteRequestModel -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import typing -import pydantic - - -class QuoteResponseModel(UncheckedBaseModel): - quote_usd: float - valid_until: dt.datetime - request: QuoteRequestModel - quote_token: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/rag_config.py b/src/elevenlabs/types/rag_config.py new file mode 100644 index 00000000..30716bdf --- /dev/null +++ b/src/elevenlabs/types/rag_config.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .embedding_model_enum import EmbeddingModelEnum +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class RagConfig(UncheckedBaseModel): + enabled: typing.Optional[bool] = None + embedding_model: typing.Optional[EmbeddingModelEnum] = None + max_vector_distance: typing.Optional[float] = pydantic.Field(default=None) + """ + Maximum vector distance of retrieved chunks. + """ + + max_documents_length: typing.Optional[int] = pydantic.Field(default=None) + """ + Maximum total length of document chunks retrieved from RAG. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/rag_index_response_model.py b/src/elevenlabs/types/rag_index_response_model.py new file mode 100644 index 00000000..22416b17 --- /dev/null +++ b/src/elevenlabs/types/rag_index_response_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .rag_index_status import RagIndexStatus +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class RagIndexResponseModel(UncheckedBaseModel): + status: RagIndexStatus + progress_percentage: float + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/rag_index_status.py b/src/elevenlabs/types/rag_index_status.py new file mode 100644 index 00000000..14893bda --- /dev/null +++ b/src/elevenlabs/types/rag_index_status.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RagIndexStatus = typing.Union[typing.Literal["created", "processing", "failed", "succeeded"], typing.Any] diff --git a/src/elevenlabs/types/reader_resource_response_model.py b/src/elevenlabs/types/reader_resource_response_model.py index e98b7096..8a5ad70a 100644 --- a/src/elevenlabs/types/reader_resource_response_model.py +++ b/src/elevenlabs/types/reader_resource_response_model.py @@ -2,14 +2,21 @@ from ..core.unchecked_base_model import UncheckedBaseModel from .reader_resource_response_model_resource_type import ReaderResourceResponseModelResourceType +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class ReaderResourceResponseModel(UncheckedBaseModel): - resource_type: ReaderResourceResponseModelResourceType - resource_id: str + resource_type: ReaderResourceResponseModelResourceType = pydantic.Field() + """ + The type of resource. + """ + + resource_id: str = pydantic.Field() + """ + The ID of the resource. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/recording_response.py b/src/elevenlabs/types/recording_response.py index 03034c0e..b53de91a 100644 --- a/src/elevenlabs/types/recording_response.py +++ b/src/elevenlabs/types/recording_response.py @@ -1,17 +1,36 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class RecordingResponse(UncheckedBaseModel): - recording_id: str - mime_type: str - size_bytes: int - upload_date_unix: int - transcription: str + recording_id: str = pydantic.Field() + """ + The ID of the recording. + """ + + mime_type: str = pydantic.Field() + """ + The MIME type of the recording. + """ + + size_bytes: int = pydantic.Field() + """ + The size of the recording in bytes. + """ + + upload_date_unix: int = pydantic.Field() + """ + The date of the recording in Unix time. + """ + + transcription: str = pydantic.Field() + """ + The transcription of the recording. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/remove_pronunciation_dictionary_rules_response_model.py b/src/elevenlabs/types/remove_pronunciation_dictionary_rules_response_model.py index d2f4fe3d..d211d378 100644 --- a/src/elevenlabs/types/remove_pronunciation_dictionary_rules_response_model.py +++ b/src/elevenlabs/types/remove_pronunciation_dictionary_rules_response_model.py @@ -1,14 +1,21 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -import pydantic class RemovePronunciationDictionaryRulesResponseModel(UncheckedBaseModel): - id: str - version_id: str + id: str = pydantic.Field() + """ + The ID of the pronunciation dictionary. + """ + + version_id: str = pydantic.Field() + """ + The version ID of the pronunciation dictionary. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/review_state.py b/src/elevenlabs/types/review_state.py deleted file mode 100644 index 721cb591..00000000 --- a/src/elevenlabs/types/review_state.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ReviewState = typing.Union[ - typing.Literal["unclaimed", "claimed", "submitted", "done", "rejected", "in_progress"], typing.Any -] diff --git a/src/elevenlabs/types/tag_model.py b/src/elevenlabs/types/segment_create_response.py similarity index 83% rename from src/elevenlabs/types/tag_model.py rename to src/elevenlabs/types/segment_create_response.py index 283fd3f4..13084f21 100644 --- a/src/elevenlabs/types/tag_model.py +++ b/src/elevenlabs/types/segment_create_response.py @@ -1,15 +1,14 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel -from .tag_kind import TagKind from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing import pydantic -class TagModel(UncheckedBaseModel): - kind: TagKind - value: str +class SegmentCreateResponse(UncheckedBaseModel): + version: int + new_segment: str if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/conv_ai_stored_secret_config.py b/src/elevenlabs/types/segment_delete_response.py similarity index 85% rename from src/elevenlabs/types/conv_ai_stored_secret_config.py rename to src/elevenlabs/types/segment_delete_response.py index 316c978d..4f3642ee 100644 --- a/src/elevenlabs/types/conv_ai_stored_secret_config.py +++ b/src/elevenlabs/types/segment_delete_response.py @@ -6,9 +6,8 @@ import pydantic -class ConvAiStoredSecretConfig(UncheckedBaseModel): - secret_id: str - name: str +class SegmentDeleteResponse(UncheckedBaseModel): + version: int if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/quote_request_model.py b/src/elevenlabs/types/segment_dub_response.py similarity index 79% rename from src/elevenlabs/types/quote_request_model.py rename to src/elevenlabs/types/segment_dub_response.py index 43dff3b0..97c09f7e 100644 --- a/src/elevenlabs/types/quote_request_model.py +++ b/src/elevenlabs/types/segment_dub_response.py @@ -6,11 +6,8 @@ import pydantic -class QuoteRequestModel(UncheckedBaseModel): - content_hash: str - duration_s: float - speaker_count: int - language: str +class SegmentDubResponse(UncheckedBaseModel): + version: int if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/segment_transcription_response.py b/src/elevenlabs/types/segment_transcription_response.py new file mode 100644 index 00000000..de02ca22 --- /dev/null +++ b/src/elevenlabs/types/segment_transcription_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class SegmentTranscriptionResponse(UncheckedBaseModel): + version: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/segment_translation_response.py b/src/elevenlabs/types/segment_translation_response.py new file mode 100644 index 00000000..c88724c9 --- /dev/null +++ b/src/elevenlabs/types/segment_translation_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class SegmentTranslationResponse(UncheckedBaseModel): + version: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/segment_update_response.py b/src/elevenlabs/types/segment_update_response.py new file mode 100644 index 00000000..1fba5bd5 --- /dev/null +++ b/src/elevenlabs/types/segment_update_response.py @@ -0,0 +1,19 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class SegmentUpdateResponse(UncheckedBaseModel): + version: int + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/speaker_segment.py b/src/elevenlabs/types/speaker_segment.py new file mode 100644 index 00000000..9a147dfa --- /dev/null +++ b/src/elevenlabs/types/speaker_segment.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import typing +from .dubbed_segment import DubbedSegment +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class SpeakerSegment(UncheckedBaseModel): + id: str + start_time: float + end_time: float + text: str + dubs: typing.Dict[str, DubbedSegment] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/speaker_track.py b/src/elevenlabs/types/speaker_track.py new file mode 100644 index 00000000..57dbd5eb --- /dev/null +++ b/src/elevenlabs/types/speaker_track.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +from .dubbing_media_reference import DubbingMediaReference +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class SpeakerTrack(UncheckedBaseModel): + id: str + media_ref: DubbingMediaReference + speaker_name: str + segments: typing.List[str] + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/speech_to_text_character_response_model.py b/src/elevenlabs/types/speech_to_text_character_response_model.py index c3caec22..5fd02a66 100644 --- a/src/elevenlabs/types/speech_to_text_character_response_model.py +++ b/src/elevenlabs/types/speech_to_text_character_response_model.py @@ -2,8 +2,8 @@ from ..core.unchecked_base_model import UncheckedBaseModel import pydantic -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class SpeechToTextCharacterResponseModel(UncheckedBaseModel): @@ -12,12 +12,12 @@ class SpeechToTextCharacterResponseModel(UncheckedBaseModel): The character that was transcribed. """ - start: float = pydantic.Field() + start: typing.Optional[float] = pydantic.Field(default=None) """ The start time of the character in seconds. """ - end: float = pydantic.Field() + end: typing.Optional[float] = pydantic.Field(default=None) """ The end time of the character in seconds. """ diff --git a/src/elevenlabs/types/task_instance_event_kind.py b/src/elevenlabs/types/task_instance_event_kind.py deleted file mode 100644 index 17c4a8a6..00000000 --- a/src/elevenlabs/types/task_instance_event_kind.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -TaskInstanceEventKind = typing.Union[ - typing.Literal["claim", "assign", "reject", "submit", "approve", "done"], typing.Any -] diff --git a/src/elevenlabs/types/task_instance_event_response_model.py b/src/elevenlabs/types/task_instance_event_response_model.py deleted file mode 100644 index c000f54f..00000000 --- a/src/elevenlabs/types/task_instance_event_response_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.unchecked_base_model import UncheckedBaseModel -import datetime as dt -from .task_instance_event_kind import TaskInstanceEventKind -import typing -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic - - -class TaskInstanceEventResponseModel(UncheckedBaseModel): - timestamp: dt.datetime - kind: TaskInstanceEventKind - meta: typing.Optional[typing.Dict[str, str]] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/tool_request_model.py b/src/elevenlabs/types/tool_request_model.py deleted file mode 100644 index 091e2fad..00000000 --- a/src/elevenlabs/types/tool_request_model.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations -from ..core.unchecked_base_model import UncheckedBaseModel -from .array_json_schema_property import ArrayJsonSchemaProperty -from .object_json_schema_property import ObjectJsonSchemaProperty -from .tool_request_model_tool_config import ToolRequestModelToolConfig -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import typing -import pydantic -from ..core.pydantic_utilities import update_forward_refs - - -class ToolRequestModel(UncheckedBaseModel): - tool_config: ToolRequestModelToolConfig - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -update_forward_refs(ArrayJsonSchemaProperty, ToolRequestModel=ToolRequestModel) -update_forward_refs(ObjectJsonSchemaProperty, ToolRequestModel=ToolRequestModel) diff --git a/src/elevenlabs/types/tool_request_model_tool_config.py b/src/elevenlabs/types/tool_request_model_tool_config.py deleted file mode 100644 index 40c12f98..00000000 --- a/src/elevenlabs/types/tool_request_model_tool_config.py +++ /dev/null @@ -1,74 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations -from ..core.unchecked_base_model import UncheckedBaseModel -from .array_json_schema_property import ArrayJsonSchemaProperty -from .object_json_schema_property import ObjectJsonSchemaProperty -import typing -from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic -import typing_extensions -from ..core.unchecked_base_model import UnionMetadata -from ..core.pydantic_utilities import update_forward_refs - - -class ToolRequestModelToolConfig_Webhook(UncheckedBaseModel): - type: typing.Literal["webhook"] = "webhook" - name: str - description: str - api_schema: WebhookToolApiSchemaConfig - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class ToolRequestModelToolConfig_Client(UncheckedBaseModel): - type: typing.Literal["client"] = "client" - name: str - description: str - parameters: typing.Optional[ObjectJsonSchemaProperty] = None - expects_response: typing.Optional[bool] = None - response_timeout_secs: typing.Optional[int] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class ToolRequestModelToolConfig_System(UncheckedBaseModel): - type: typing.Literal["system"] = "system" - name: str - description: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -ToolRequestModelToolConfig = typing_extensions.Annotated[ - typing.Union[ - ToolRequestModelToolConfig_Webhook, ToolRequestModelToolConfig_Client, ToolRequestModelToolConfig_System - ], - UnionMetadata(discriminant="type"), -] -update_forward_refs(ArrayJsonSchemaProperty, ToolRequestModelToolConfig_Webhook=ToolRequestModelToolConfig_Webhook) -update_forward_refs(ObjectJsonSchemaProperty, ToolRequestModelToolConfig_Webhook=ToolRequestModelToolConfig_Webhook) -update_forward_refs(ArrayJsonSchemaProperty, ToolRequestModelToolConfig_Client=ToolRequestModelToolConfig_Client) -update_forward_refs(ObjectJsonSchemaProperty, ToolRequestModelToolConfig_Client=ToolRequestModelToolConfig_Client) diff --git a/src/elevenlabs/types/tool_response_model.py b/src/elevenlabs/types/tool_response_model.py deleted file mode 100644 index b5f9c5c8..00000000 --- a/src/elevenlabs/types/tool_response_model.py +++ /dev/null @@ -1,31 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations -from ..core.unchecked_base_model import UncheckedBaseModel -from .array_json_schema_property import ArrayJsonSchemaProperty -from .object_json_schema_property import ObjectJsonSchemaProperty -from .tool_response_model_tool_config import ToolResponseModelToolConfig -import typing -from .tool_response_model_dependent_agents_item import ToolResponseModelDependentAgentsItem -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic -from ..core.pydantic_utilities import update_forward_refs - - -class ToolResponseModel(UncheckedBaseModel): - id: str - tool_config: ToolResponseModelToolConfig - dependent_agents: typing.List[ToolResponseModelDependentAgentsItem] - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -update_forward_refs(ArrayJsonSchemaProperty, ToolResponseModel=ToolResponseModel) -update_forward_refs(ObjectJsonSchemaProperty, ToolResponseModel=ToolResponseModel) diff --git a/src/elevenlabs/types/tool_response_model_dependent_agents_item.py b/src/elevenlabs/types/tool_response_model_dependent_agents_item.py deleted file mode 100644 index bbcf64a7..00000000 --- a/src/elevenlabs/types/tool_response_model_dependent_agents_item.py +++ /dev/null @@ -1,46 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations -from ..core.unchecked_base_model import UncheckedBaseModel -import typing -from .dependent_available_agent_identifier_access_level import DependentAvailableAgentIdentifierAccessLevel -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic -import typing_extensions -from ..core.unchecked_base_model import UnionMetadata - - -class ToolResponseModelDependentAgentsItem_Available(UncheckedBaseModel): - type: typing.Literal["available"] = "available" - id: str - name: str - created_at_unix_secs: int - access_level: DependentAvailableAgentIdentifierAccessLevel - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class ToolResponseModelDependentAgentsItem_Unknown(UncheckedBaseModel): - type: typing.Literal["unknown"] = "unknown" - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -ToolResponseModelDependentAgentsItem = typing_extensions.Annotated[ - typing.Union[ToolResponseModelDependentAgentsItem_Available, ToolResponseModelDependentAgentsItem_Unknown], - UnionMetadata(discriminant="type"), -] diff --git a/src/elevenlabs/types/tool_response_model_tool_config.py b/src/elevenlabs/types/tool_response_model_tool_config.py deleted file mode 100644 index b587b393..00000000 --- a/src/elevenlabs/types/tool_response_model_tool_config.py +++ /dev/null @@ -1,74 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations -from ..core.unchecked_base_model import UncheckedBaseModel -from .array_json_schema_property import ArrayJsonSchemaProperty -from .object_json_schema_property import ObjectJsonSchemaProperty -import typing -from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic -import typing_extensions -from ..core.unchecked_base_model import UnionMetadata -from ..core.pydantic_utilities import update_forward_refs - - -class ToolResponseModelToolConfig_Webhook(UncheckedBaseModel): - type: typing.Literal["webhook"] = "webhook" - name: str - description: str - api_schema: WebhookToolApiSchemaConfig - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class ToolResponseModelToolConfig_Client(UncheckedBaseModel): - type: typing.Literal["client"] = "client" - name: str - description: str - parameters: typing.Optional[ObjectJsonSchemaProperty] = None - expects_response: typing.Optional[bool] = None - response_timeout_secs: typing.Optional[int] = None - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -class ToolResponseModelToolConfig_System(UncheckedBaseModel): - type: typing.Literal["system"] = "system" - name: str - description: str - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -ToolResponseModelToolConfig = typing_extensions.Annotated[ - typing.Union[ - ToolResponseModelToolConfig_Webhook, ToolResponseModelToolConfig_Client, ToolResponseModelToolConfig_System - ], - UnionMetadata(discriminant="type"), -] -update_forward_refs(ArrayJsonSchemaProperty, ToolResponseModelToolConfig_Webhook=ToolResponseModelToolConfig_Webhook) -update_forward_refs(ObjectJsonSchemaProperty, ToolResponseModelToolConfig_Webhook=ToolResponseModelToolConfig_Webhook) -update_forward_refs(ArrayJsonSchemaProperty, ToolResponseModelToolConfig_Client=ToolResponseModelToolConfig_Client) -update_forward_refs(ObjectJsonSchemaProperty, ToolResponseModelToolConfig_Client=ToolResponseModelToolConfig_Client) diff --git a/src/elevenlabs/types/tools_response_model.py b/src/elevenlabs/types/tools_response_model.py deleted file mode 100644 index fa063557..00000000 --- a/src/elevenlabs/types/tools_response_model.py +++ /dev/null @@ -1,28 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from __future__ import annotations -from ..core.unchecked_base_model import UncheckedBaseModel -from .array_json_schema_property import ArrayJsonSchemaProperty -from .object_json_schema_property import ObjectJsonSchemaProperty -import typing -from .tool_response_model import ToolResponseModel -from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic -from ..core.pydantic_utilities import update_forward_refs - - -class ToolsResponseModel(UncheckedBaseModel): - tools: typing.List[ToolResponseModel] - - if IS_PYDANTIC_V2: - model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 - else: - - class Config: - frozen = True - smart_union = True - extra = pydantic.Extra.allow - - -update_forward_refs(ArrayJsonSchemaProperty, ToolsResponseModel=ToolsResponseModel) -update_forward_refs(ObjectJsonSchemaProperty, ToolsResponseModel=ToolsResponseModel) diff --git a/src/elevenlabs/types/tts_conversational_config.py b/src/elevenlabs/types/tts_conversational_config.py index 3c219fb3..e534ebab 100644 --- a/src/elevenlabs/types/tts_conversational_config.py +++ b/src/elevenlabs/types/tts_conversational_config.py @@ -16,6 +16,7 @@ class TtsConversationalConfig(UncheckedBaseModel): agent_output_audio_format: typing.Optional[TtsOutputFormat] = None optimize_streaming_latency: typing.Optional[TtsOptimizeStreamingLatency] = None stability: typing.Optional[float] = None + speed: typing.Optional[float] = None similarity_boost: typing.Optional[float] = None pronunciation_dictionary_locators: typing.Optional[typing.List[PydanticPronunciationDictionaryVersionLocator]] = ( None diff --git a/src/elevenlabs/types/update_workspace_member_response_model.py b/src/elevenlabs/types/update_workspace_member_response_model.py new file mode 100644 index 00000000..3041005f --- /dev/null +++ b/src/elevenlabs/types/update_workspace_member_response_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class UpdateWorkspaceMemberResponseModel(UncheckedBaseModel): + status: str = pydantic.Field() + """ + The status of the workspace member update request. If the request was successful, the status will be 'ok'. Otherwise an error message with status 500 will be returned. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/elevenlabs/types/verification_attempt_response.py b/src/elevenlabs/types/verification_attempt_response.py index 3d4e06fa..756916da 100644 --- a/src/elevenlabs/types/verification_attempt_response.py +++ b/src/elevenlabs/types/verification_attempt_response.py @@ -1,19 +1,42 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from .recording_response import RecordingResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class VerificationAttemptResponse(UncheckedBaseModel): - text: str - date_unix: int - accepted: bool - similarity: float - levenshtein_distance: float - recording: typing.Optional[RecordingResponse] = None + text: str = pydantic.Field() + """ + The text of the verification attempt. + """ + + date_unix: int = pydantic.Field() + """ + The date of the verification attempt in Unix time. + """ + + accepted: bool = pydantic.Field() + """ + Whether the verification attempt was accepted. + """ + + similarity: float = pydantic.Field() + """ + The similarity of the verification attempt. + """ + + levenshtein_distance: float = pydantic.Field() + """ + The Levenshtein distance of the verification attempt. + """ + + recording: typing.Optional[RecordingResponse] = pydantic.Field(default=None) + """ + The recording of the verification attempt. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/verified_voice_language_response_model.py b/src/elevenlabs/types/verified_voice_language_response_model.py index 97151b4f..0eba2fb9 100644 --- a/src/elevenlabs/types/verified_voice_language_response_model.py +++ b/src/elevenlabs/types/verified_voice_language_response_model.py @@ -1,15 +1,31 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class VerifiedVoiceLanguageResponseModel(UncheckedBaseModel): - language: str - model_id: str - accent: typing.Optional[str] = None + language: str = pydantic.Field() + """ + The language of the voice. + """ + + model_id: str = pydantic.Field() + """ + The voice's model ID. + """ + + accent: typing.Optional[str] = pydantic.Field(default=None) + """ + The voice's accent, if applicable. + """ + + preview_url: typing.Optional[str] = pydantic.Field(default=None) + """ + The voice's preview URL, if applicable. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice.py b/src/elevenlabs/types/voice.py index 2c068214..24d3fbf6 100644 --- a/src/elevenlabs/types/voice.py +++ b/src/elevenlabs/types/voice.py @@ -1,6 +1,7 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from .voice_sample import VoiceSample from .voice_response_model_category import VoiceResponseModelCategory @@ -11,30 +12,108 @@ from .voice_response_model_safety_control import VoiceResponseModelSafetyControl from .voice_verification_response import VoiceVerificationResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class Voice(UncheckedBaseModel): - voice_id: str - name: typing.Optional[str] = None - samples: typing.Optional[typing.List[VoiceSample]] = None - category: typing.Optional[VoiceResponseModelCategory] = None - fine_tuning: typing.Optional[FineTuningResponse] = None - labels: typing.Optional[typing.Dict[str, str]] = None - description: typing.Optional[str] = None - preview_url: typing.Optional[str] = None - available_for_tiers: typing.Optional[typing.List[str]] = None - settings: typing.Optional[VoiceSettings] = None - sharing: typing.Optional[VoiceSharingResponse] = None - high_quality_base_model_ids: typing.Optional[typing.List[str]] = None - verified_languages: typing.Optional[typing.List[VerifiedVoiceLanguageResponseModel]] = None - safety_control: typing.Optional[VoiceResponseModelSafetyControl] = None - voice_verification: typing.Optional[VoiceVerificationResponse] = None - permission_on_resource: typing.Optional[str] = None - is_owner: typing.Optional[bool] = None - is_legacy: typing.Optional[bool] = None - is_mixed: typing.Optional[bool] = None - created_at_unix: typing.Optional[int] = None + voice_id: str = pydantic.Field() + """ + The ID of the voice. + """ + + name: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the voice. + """ + + samples: typing.Optional[typing.List[VoiceSample]] = pydantic.Field(default=None) + """ + List of samples associated with the voice. + """ + + category: typing.Optional[VoiceResponseModelCategory] = pydantic.Field(default=None) + """ + The category of the voice. + """ + + fine_tuning: typing.Optional[FineTuningResponse] = pydantic.Field(default=None) + """ + Fine-tuning information for the voice. + """ + + labels: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None) + """ + Labels associated with the voice. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + The description of the voice. + """ + + preview_url: typing.Optional[str] = pydantic.Field(default=None) + """ + The preview URL of the voice. + """ + + available_for_tiers: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + The tiers the voice is available for. + """ + + settings: typing.Optional[VoiceSettings] = pydantic.Field(default=None) + """ + The settings of the voice. + """ + + sharing: typing.Optional[VoiceSharingResponse] = pydantic.Field(default=None) + """ + The sharing information of the voice. + """ + + high_quality_base_model_ids: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + The base model IDs for high-quality voices. + """ + + verified_languages: typing.Optional[typing.List[VerifiedVoiceLanguageResponseModel]] = pydantic.Field(default=None) + """ + The verified languages of the voice. + """ + + safety_control: typing.Optional[VoiceResponseModelSafetyControl] = pydantic.Field(default=None) + """ + The safety controls of the voice. + """ + + voice_verification: typing.Optional[VoiceVerificationResponse] = pydantic.Field(default=None) + """ + The voice verification of the voice. + """ + + permission_on_resource: typing.Optional[str] = pydantic.Field(default=None) + """ + The permission on the resource of the voice. + """ + + is_owner: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the voice is owned by the user. + """ + + is_legacy: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the voice is legacy. + """ + + is_mixed: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the voice is mixed. + """ + + created_at_unix: typing.Optional[int] = pydantic.Field(default=None) + """ + The creation time of the voice in Unix time. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice_sample.py b/src/elevenlabs/types/voice_sample.py index 6a6b4431..05ded29b 100644 --- a/src/elevenlabs/types/voice_sample.py +++ b/src/elevenlabs/types/voice_sample.py @@ -2,16 +2,35 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class VoiceSample(UncheckedBaseModel): - sample_id: typing.Optional[str] = None - file_name: typing.Optional[str] = None - mime_type: typing.Optional[str] = None - size_bytes: typing.Optional[int] = None - hash: typing.Optional[str] = None + sample_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the sample. + """ + + file_name: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the sample file. + """ + + mime_type: typing.Optional[str] = pydantic.Field(default=None) + """ + The MIME type of the sample file. + """ + + size_bytes: typing.Optional[int] = pydantic.Field(default=None) + """ + The size of the sample file in bytes. + """ + + hash: typing.Optional[str] = pydantic.Field(default=None) + """ + The hash of the sample file. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice_settings.py b/src/elevenlabs/types/voice_settings.py index d44fc78d..15e26cf9 100644 --- a/src/elevenlabs/types/voice_settings.py +++ b/src/elevenlabs/types/voice_settings.py @@ -2,16 +2,35 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class VoiceSettings(UncheckedBaseModel): - stability: typing.Optional[float] = None - similarity_boost: typing.Optional[float] = None - style: typing.Optional[float] = None - use_speaker_boost: typing.Optional[bool] = None - speed: typing.Optional[float] = None + stability: typing.Optional[float] = pydantic.Field(default=None) + """ + Determines how stable the voice is and the randomness between each generation. Lower values introduce broader emotional range for the voice. Higher values can result in a monotonous voice with limited emotion. + """ + + similarity_boost: typing.Optional[float] = pydantic.Field(default=None) + """ + Determines how closely the AI should adhere to the original voice when attempting to replicate it. + """ + + style: typing.Optional[float] = pydantic.Field(default=None) + """ + Determines the style exaggeration of the voice. This setting attempts to amplify the style of the original speaker. It does consume additional computational resources and might increase latency if set to anything other than 0. + """ + + use_speaker_boost: typing.Optional[bool] = pydantic.Field(default=None) + """ + This setting boosts the similarity to the original speaker. Using this setting requires a slightly higher computational load, which in turn increases latency. + """ + + speed: typing.Optional[float] = pydantic.Field(default=None) + """ + Controls the speed of the generated speech. Values range from 0.7 to 1.2, with 1.0 being the default speed. Lower values create slower, more deliberate speech while higher values produce faster-paced speech. Extreme values can impact the quality of the generated speech. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice_sharing_moderation_check_response_model.py b/src/elevenlabs/types/voice_sharing_moderation_check_response_model.py index cdd6875b..749bc768 100644 --- a/src/elevenlabs/types/voice_sharing_moderation_check_response_model.py +++ b/src/elevenlabs/types/voice_sharing_moderation_check_response_model.py @@ -2,20 +2,55 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 class VoiceSharingModerationCheckResponseModel(UncheckedBaseModel): - date_checked_unix: typing.Optional[int] = None - name_value: typing.Optional[str] = None - name_check: typing.Optional[bool] = None - description_value: typing.Optional[str] = None - description_check: typing.Optional[bool] = None - sample_ids: typing.Optional[typing.List[str]] = None - sample_checks: typing.Optional[typing.List[float]] = None - captcha_ids: typing.Optional[typing.List[str]] = None - captcha_checks: typing.Optional[typing.List[float]] = None + date_checked_unix: typing.Optional[int] = pydantic.Field(default=None) + """ + The date the moderation check was made in Unix time. + """ + + name_value: typing.Optional[str] = pydantic.Field(default=None) + """ + The name value of the voice. + """ + + name_check: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the name check was successful. + """ + + description_value: typing.Optional[str] = pydantic.Field(default=None) + """ + The description value of the voice. + """ + + description_check: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the description check was successful. + """ + + sample_ids: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + A list of sample IDs. + """ + + sample_checks: typing.Optional[typing.List[float]] = pydantic.Field(default=None) + """ + A list of sample checks. + """ + + captcha_ids: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + A list of captcha IDs. + """ + + captcha_checks: typing.Optional[typing.List[float]] = pydantic.Field(default=None) + """ + A list of CAPTCHA check values. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice_sharing_response.py b/src/elevenlabs/types/voice_sharing_response.py index 9fb5062d..16f40526 100644 --- a/src/elevenlabs/types/voice_sharing_response.py +++ b/src/elevenlabs/types/voice_sharing_response.py @@ -3,47 +3,174 @@ from ..core.unchecked_base_model import UncheckedBaseModel import typing from .voice_sharing_state import VoiceSharingState +import pydantic from .voice_sharing_response_model_category import VoiceSharingResponseModelCategory from .review_status import ReviewStatus from .voice_sharing_moderation_check_response_model import VoiceSharingModerationCheckResponseModel from .reader_resource_response_model import ReaderResourceResponseModel from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class VoiceSharingResponse(UncheckedBaseModel): - status: typing.Optional[VoiceSharingState] = None - history_item_sample_id: typing.Optional[str] = None - date_unix: typing.Optional[int] = None - whitelisted_emails: typing.Optional[typing.List[str]] = None - public_owner_id: typing.Optional[str] = None - original_voice_id: typing.Optional[str] = None - financial_rewards_enabled: typing.Optional[bool] = None - free_users_allowed: typing.Optional[bool] = None - live_moderation_enabled: typing.Optional[bool] = None - rate: typing.Optional[float] = None - notice_period: typing.Optional[int] = None - disable_at_unix: typing.Optional[int] = None - voice_mixing_allowed: typing.Optional[bool] = None - featured: typing.Optional[bool] = None - category: typing.Optional[VoiceSharingResponseModelCategory] = None - reader_app_enabled: typing.Optional[bool] = None - image_url: typing.Optional[str] = None - ban_reason: typing.Optional[str] = None - liked_by_count: typing.Optional[int] = None - cloned_by_count: typing.Optional[int] = None - name: typing.Optional[str] = None - description: typing.Optional[str] = None - labels: typing.Optional[typing.Dict[str, str]] = None - review_status: typing.Optional[ReviewStatus] = None - review_message: typing.Optional[str] = None - enabled_in_library: typing.Optional[bool] = None - instagram_username: typing.Optional[str] = None - twitter_username: typing.Optional[str] = None - youtube_username: typing.Optional[str] = None - tiktok_username: typing.Optional[str] = None - moderation_check: typing.Optional[VoiceSharingModerationCheckResponseModel] = None - reader_restricted_on: typing.Optional[typing.List[ReaderResourceResponseModel]] = None + status: typing.Optional[VoiceSharingState] = pydantic.Field(default=None) + """ + The status of the voice sharing. + """ + + history_item_sample_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The sample ID of the history item. + """ + + date_unix: typing.Optional[int] = pydantic.Field(default=None) + """ + The date of the voice sharing in Unix time. + """ + + whitelisted_emails: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + A list of whitelisted emails. + """ + + public_owner_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the public owner. + """ + + original_voice_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The ID of the original voice. + """ + + financial_rewards_enabled: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether financial rewards are enabled. + """ + + free_users_allowed: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether free users are allowed. + """ + + live_moderation_enabled: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether live moderation is enabled. + """ + + rate: typing.Optional[float] = pydantic.Field(default=None) + """ + The rate of the voice sharing. + """ + + notice_period: typing.Optional[int] = pydantic.Field(default=None) + """ + The notice period of the voice sharing. + """ + + disable_at_unix: typing.Optional[int] = pydantic.Field(default=None) + """ + The date of the voice sharing in Unix time. + """ + + voice_mixing_allowed: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether voice mixing is allowed. + """ + + featured: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the voice is featured. + """ + + category: typing.Optional[VoiceSharingResponseModelCategory] = pydantic.Field(default=None) + """ + The category of the voice. + """ + + reader_app_enabled: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the reader app is enabled. + """ + + image_url: typing.Optional[str] = pydantic.Field(default=None) + """ + The image URL of the voice. + """ + + ban_reason: typing.Optional[str] = pydantic.Field(default=None) + """ + The ban reason of the voice. + """ + + liked_by_count: typing.Optional[int] = pydantic.Field(default=None) + """ + The number of likes on the voice. + """ + + cloned_by_count: typing.Optional[int] = pydantic.Field(default=None) + """ + The number of clones on the voice. + """ + + name: typing.Optional[str] = pydantic.Field(default=None) + """ + The name of the voice. + """ + + description: typing.Optional[str] = pydantic.Field(default=None) + """ + The description of the voice. + """ + + labels: typing.Optional[typing.Dict[str, str]] = pydantic.Field(default=None) + """ + The labels of the voice. + """ + + review_status: typing.Optional[ReviewStatus] = pydantic.Field(default=None) + """ + The review status of the voice. + """ + + review_message: typing.Optional[str] = pydantic.Field(default=None) + """ + The review message of the voice. + """ + + enabled_in_library: typing.Optional[bool] = pydantic.Field(default=None) + """ + Whether the voice is enabled in the library. + """ + + instagram_username: typing.Optional[str] = pydantic.Field(default=None) + """ + The Instagram username of the voice. + """ + + twitter_username: typing.Optional[str] = pydantic.Field(default=None) + """ + The Twitter/X username of the voice. + """ + + youtube_username: typing.Optional[str] = pydantic.Field(default=None) + """ + The YouTube username of the voice. + """ + + tiktok_username: typing.Optional[str] = pydantic.Field(default=None) + """ + The TikTok username of the voice. + """ + + moderation_check: typing.Optional[VoiceSharingModerationCheckResponseModel] = pydantic.Field(default=None) + """ + The moderation check of the voice. + """ + + reader_restricted_on: typing.Optional[typing.List[ReaderResourceResponseModel]] = pydantic.Field(default=None) + """ + The reader restricted on of the voice. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/voice_sharing_response_model_category.py b/src/elevenlabs/types/voice_sharing_response_model_category.py index 8439ad79..e66678ef 100644 --- a/src/elevenlabs/types/voice_sharing_response_model_category.py +++ b/src/elevenlabs/types/voice_sharing_response_model_category.py @@ -3,5 +3,5 @@ import typing VoiceSharingResponseModelCategory = typing.Union[ - typing.Literal["generated", "professional", "high_quality", "famous"], typing.Any + typing.Literal["generated", "cloned", "premade", "professional", "famous", "high_quality"], typing.Any ] diff --git a/src/elevenlabs/types/voice_verification_response.py b/src/elevenlabs/types/voice_verification_response.py index 2a18aaa1..86c303b7 100644 --- a/src/elevenlabs/types/voice_verification_response.py +++ b/src/elevenlabs/types/voice_verification_response.py @@ -1,19 +1,42 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from .verification_attempt_response import VerificationAttemptResponse from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class VoiceVerificationResponse(UncheckedBaseModel): - requires_verification: bool - is_verified: bool - verification_failures: typing.List[str] - verification_attempts_count: int - language: typing.Optional[str] = None - verification_attempts: typing.Optional[typing.List[VerificationAttemptResponse]] = None + requires_verification: bool = pydantic.Field() + """ + Whether the voice requires verification. + """ + + is_verified: bool = pydantic.Field() + """ + Whether the voice has been verified. + """ + + verification_failures: typing.List[str] = pydantic.Field() + """ + List of verification failures. + """ + + verification_attempts_count: int = pydantic.Field() + """ + The number of verification attempts. + """ + + language: typing.Optional[str] = pydantic.Field(default=None) + """ + The language of the voice. + """ + + verification_attempts: typing.Optional[typing.List[VerificationAttemptResponse]] = pydantic.Field(default=None) + """ + Number of times a verification was attempted. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/webhook_tool_config.py b/src/elevenlabs/types/webhook_tool_config.py index 9d76e0dd..dd3e6b63 100644 --- a/src/elevenlabs/types/webhook_tool_config.py +++ b/src/elevenlabs/types/webhook_tool_config.py @@ -5,8 +5,9 @@ from .array_json_schema_property import ArrayJsonSchemaProperty from .object_json_schema_property import ObjectJsonSchemaProperty from .webhook_tool_api_schema_config import WebhookToolApiSchemaConfig -from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing +from .dynamic_variables_config import DynamicVariablesConfig +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic from ..core.pydantic_utilities import update_forward_refs @@ -19,6 +20,7 @@ class WebhookToolConfig(UncheckedBaseModel): name: str description: str api_schema: WebhookToolApiSchemaConfig + dynamic_variables: typing.Optional[DynamicVariablesConfig] = None if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/types/widget_config.py b/src/elevenlabs/types/widget_config.py index 85a52738..4ae5248c 100644 --- a/src/elevenlabs/types/widget_config.py +++ b/src/elevenlabs/types/widget_config.py @@ -30,6 +30,7 @@ class WidgetConfig(UncheckedBaseModel): listening_text: typing.Optional[str] = None speaking_text: typing.Optional[str] = None shareable_page_text: typing.Optional[str] = None + shareable_page_show_terms: typing.Optional[bool] = None terms_text: typing.Optional[str] = None terms_html: typing.Optional[str] = None terms_key: typing.Optional[str] = None diff --git a/src/elevenlabs/types/widget_config_response_model.py b/src/elevenlabs/types/widget_config_response_model.py index a12838ff..6fde0302 100644 --- a/src/elevenlabs/types/widget_config_response_model.py +++ b/src/elevenlabs/types/widget_config_response_model.py @@ -30,6 +30,7 @@ class WidgetConfigResponseModel(UncheckedBaseModel): listening_text: typing.Optional[str] = None speaking_text: typing.Optional[str] = None shareable_page_text: typing.Optional[str] = None + shareable_page_show_terms: typing.Optional[bool] = None terms_text: typing.Optional[str] = None terms_html: typing.Optional[str] = None terms_key: typing.Optional[str] = None diff --git a/src/elevenlabs/types/workspace_group_by_name_response_model.py b/src/elevenlabs/types/workspace_group_by_name_response_model.py index 5a935769..0f83d6f4 100644 --- a/src/elevenlabs/types/workspace_group_by_name_response_model.py +++ b/src/elevenlabs/types/workspace_group_by_name_response_model.py @@ -1,15 +1,26 @@ # This file was auto-generated by Fern from our API Definition. from ..core.unchecked_base_model import UncheckedBaseModel +import pydantic import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 -import pydantic class WorkspaceGroupByNameResponseModel(UncheckedBaseModel): - name: str - id: str - members_emails: typing.List[str] + name: str = pydantic.Field() + """ + The name of the workspace group. + """ + + id: str = pydantic.Field() + """ + The ID of the workspace group. + """ + + members_emails: typing.List[str] = pydantic.Field() + """ + The emails of the members of the workspace group. + """ if IS_PYDANTIC_V2: model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 diff --git a/src/elevenlabs/voice_generation/client.py b/src/elevenlabs/voice_generation/client.py index 05e5fb94..5075b6a4 100644 --- a/src/elevenlabs/voice_generation/client.py +++ b/src/elevenlabs/voice_generation/client.py @@ -179,7 +179,7 @@ def create_a_previously_generated_voice( Description to use for the created voice. generated_voice_id : str - The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. + The generated_voice_id to create, call POST /v1/text-to-voice/create-previews and fetch the generated_voice_id from the response header if don't have one yet. played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]] List of voice ids that the user has played but not selected. Used for RLHF. @@ -426,7 +426,7 @@ async def create_a_previously_generated_voice( Description to use for the created voice. generated_voice_id : str - The generated_voice_id to create, call POST /v1/voice-generation/generate-voice and fetch the generated_voice_id from the response header if don't have one yet. + The generated_voice_id to create, call POST /v1/text-to-voice/create-previews and fetch the generated_voice_id from the response header if don't have one yet. played_not_selected_voice_ids : typing.Optional[typing.Sequence[str]] List of voice ids that the user has played but not selected. Used for RLHF. diff --git a/src/elevenlabs/voices/__init__.py b/src/elevenlabs/voices/__init__.py index f3ea2659..74b259cc 100644 --- a/src/elevenlabs/voices/__init__.py +++ b/src/elevenlabs/voices/__init__.py @@ -1,2 +1,5 @@ # This file was auto-generated by Fern from our API Definition. +from .types import VoicesGetSharedRequestCategory + +__all__ = ["VoicesGetSharedRequestCategory"] diff --git a/src/elevenlabs/voices/client.py b/src/elevenlabs/voices/client.py index 9467096f..5c7c8506 100644 --- a/src/elevenlabs/voices/client.py +++ b/src/elevenlabs/voices/client.py @@ -12,10 +12,14 @@ from ..types.voice_settings import VoiceSettings from ..core.jsonable_encoder import jsonable_encoder from ..types.voice import Voice +from ..types.delete_voice_response_model import DeleteVoiceResponseModel +from ..types.edit_voice_settings_response_model import EditVoiceSettingsResponseModel from ..core.serialization import convert_and_respect_annotation_metadata from .. import core from ..types.add_voice_ivc_response_model import AddVoiceIvcResponseModel +from ..types.edit_voice_response_model import EditVoiceResponseModel from ..types.add_voice_response_model import AddVoiceResponseModel +from .types.voices_get_shared_request_category import VoicesGetSharedRequestCategory from ..types.get_library_voices_response import GetLibraryVoicesResponse from ..types.profile_page_response_model import ProfilePageResponseModel from ..core.client_wrapper import AsyncClientWrapper @@ -32,7 +36,7 @@ def get_all( self, *, show_legacy: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None ) -> GetVoicesResponse: """ - Gets a list of all available voices for a user. + Returns a list of all available voices for a user. Parameters ---------- @@ -258,7 +262,7 @@ def get( def delete( self, voice_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteVoiceResponseModel: """ Deletes a voice by its ID. @@ -272,7 +276,7 @@ def delete( Returns ------- - typing.Optional[typing.Any] + DeleteVoiceResponseModel Successful Response Examples @@ -294,9 +298,9 @@ def delete( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteVoiceResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteVoiceResponseModel, # type: ignore object_=_response.json(), ), ) @@ -317,9 +321,9 @@ def delete( def edit_settings( self, voice_id: str, *, request: VoiceSettings, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> EditVoiceSettingsResponseModel: """ - Edit your settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. + Edit your settings for a specific voice. "similarity_boost" corresponds to "Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. Parameters ---------- @@ -333,7 +337,7 @@ def edit_settings( Returns ------- - typing.Optional[typing.Any] + EditVoiceSettingsResponseModel Successful Response Examples @@ -362,9 +366,9 @@ def edit_settings( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + EditVoiceSettingsResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=EditVoiceSettingsResponseModel, # type: ignore object_=_response.json(), ), ) @@ -408,7 +412,7 @@ def add( If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. description : typing.Optional[str] - How would you describe the voice? + A description of the voice. labels : typing.Optional[str] Serialized labels dictionary for the voice. @@ -481,7 +485,7 @@ def edit( description: typing.Optional[str] = OMIT, labels: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> EditVoiceResponseModel: """ Edit a voice created by you. @@ -500,7 +504,7 @@ def edit( If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. description : typing.Optional[str] - How would you describe the voice? + A description of the voice. labels : typing.Optional[str] Serialized labels dictionary for the voice. @@ -510,7 +514,7 @@ def edit( Returns ------- - typing.Optional[typing.Any] + EditVoiceResponseModel Successful Response Examples @@ -543,9 +547,9 @@ def edit( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + EditVoiceResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=EditVoiceResponseModel, # type: ignore object_=_response.json(), ), ) @@ -573,7 +577,7 @@ def add_sharing_voice( request_options: typing.Optional[RequestOptions] = None, ) -> AddVoiceResponseModel: """ - Add a sharing voice to your collection of voices in VoiceLab. + Add a shared voice to your collection of voices. Parameters ---------- @@ -647,7 +651,7 @@ def get_shared( self, *, page_size: typing.Optional[int] = None, - category: typing.Optional[str] = None, + category: typing.Optional[VoicesGetSharedRequestCategory] = None, gender: typing.Optional[str] = None, age: typing.Optional[str] = None, accent: typing.Optional[str] = None, @@ -664,36 +668,36 @@ def get_shared( request_options: typing.Optional[RequestOptions] = None, ) -> GetLibraryVoicesResponse: """ - Gets a list of shared voices. + Retrieves a list of shared voices. Parameters ---------- page_size : typing.Optional[int] How many shared voices to return at maximum. Can not exceed 100, defaults to 30. - category : typing.Optional[str] - voice category used for filtering + category : typing.Optional[VoicesGetSharedRequestCategory] + Voice category used for filtering gender : typing.Optional[str] - gender used for filtering + Gender used for filtering age : typing.Optional[str] - age used for filtering + Age used for filtering accent : typing.Optional[str] - accent used for filtering + Accent used for filtering language : typing.Optional[str] - language used for filtering + Language used for filtering search : typing.Optional[str] - search term used for filtering + Search term used for filtering use_cases : typing.Optional[typing.Union[str, typing.Sequence[str]]] - use-case used for filtering + Use-case used for filtering descriptives : typing.Optional[typing.Union[str, typing.Sequence[str]]] - search term used for filtering + Search term used for filtering featured : typing.Optional[bool] Filter featured voices @@ -708,7 +712,7 @@ def get_shared( Filter voices by public owner ID sort : typing.Optional[str] - sort criteria + Sort criteria page : typing.Optional[int] @@ -796,10 +800,10 @@ def get_similar_library_voices( See core.File for more documentation similarity_threshold : typing.Optional[float] - Threshold for voice similarity between provided sample and library voices. Must be in range <0, 2>. The smaller the value the more similar voices will be returned. + Threshold for voice similarity between provided sample and library voices. Values range from 0 to 2. The smaller the value the more similar voices will be returned. top_k : typing.Optional[int] - Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Must be in range <1, 100>. + Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Values range from 1 to 100. request_options : typing.Optional[RequestOptions] Request-specific configuration. @@ -923,7 +927,7 @@ async def get_all( self, *, show_legacy: typing.Optional[bool] = None, request_options: typing.Optional[RequestOptions] = None ) -> GetVoicesResponse: """ - Gets a list of all available voices for a user. + Returns a list of all available voices for a user. Parameters ---------- @@ -1183,7 +1187,7 @@ async def main() -> None: async def delete( self, voice_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteVoiceResponseModel: """ Deletes a voice by its ID. @@ -1197,7 +1201,7 @@ async def delete( Returns ------- - typing.Optional[typing.Any] + DeleteVoiceResponseModel Successful Response Examples @@ -1227,9 +1231,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteVoiceResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteVoiceResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1250,9 +1254,9 @@ async def main() -> None: async def edit_settings( self, voice_id: str, *, request: VoiceSettings, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> EditVoiceSettingsResponseModel: """ - Edit your settings for a specific voice. "similarity_boost" corresponds to"Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. + Edit your settings for a specific voice. "similarity_boost" corresponds to "Clarity + Similarity Enhancement" in the web app and "stability" corresponds to "Stability" slider in the web app. Parameters ---------- @@ -1266,7 +1270,7 @@ async def edit_settings( Returns ------- - typing.Optional[typing.Any] + EditVoiceSettingsResponseModel Successful Response Examples @@ -1303,9 +1307,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + EditVoiceSettingsResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=EditVoiceSettingsResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1349,7 +1353,7 @@ async def add( If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. description : typing.Optional[str] - How would you describe the voice? + A description of the voice. labels : typing.Optional[str] Serialized labels dictionary for the voice. @@ -1430,7 +1434,7 @@ async def edit( description: typing.Optional[str] = OMIT, labels: typing.Optional[str] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> EditVoiceResponseModel: """ Edit a voice created by you. @@ -1449,7 +1453,7 @@ async def edit( If set will remove background noise for voice samples using our audio isolation model. If the samples do not include background noise, it can make the quality worse. description : typing.Optional[str] - How would you describe the voice? + A description of the voice. labels : typing.Optional[str] Serialized labels dictionary for the voice. @@ -1459,7 +1463,7 @@ async def edit( Returns ------- - typing.Optional[typing.Any] + EditVoiceResponseModel Successful Response Examples @@ -1500,9 +1504,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + EditVoiceResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=EditVoiceResponseModel, # type: ignore object_=_response.json(), ), ) @@ -1530,7 +1534,7 @@ async def add_sharing_voice( request_options: typing.Optional[RequestOptions] = None, ) -> AddVoiceResponseModel: """ - Add a sharing voice to your collection of voices in VoiceLab. + Add a shared voice to your collection of voices. Parameters ---------- @@ -1612,7 +1616,7 @@ async def get_shared( self, *, page_size: typing.Optional[int] = None, - category: typing.Optional[str] = None, + category: typing.Optional[VoicesGetSharedRequestCategory] = None, gender: typing.Optional[str] = None, age: typing.Optional[str] = None, accent: typing.Optional[str] = None, @@ -1629,36 +1633,36 @@ async def get_shared( request_options: typing.Optional[RequestOptions] = None, ) -> GetLibraryVoicesResponse: """ - Gets a list of shared voices. + Retrieves a list of shared voices. Parameters ---------- page_size : typing.Optional[int] How many shared voices to return at maximum. Can not exceed 100, defaults to 30. - category : typing.Optional[str] - voice category used for filtering + category : typing.Optional[VoicesGetSharedRequestCategory] + Voice category used for filtering gender : typing.Optional[str] - gender used for filtering + Gender used for filtering age : typing.Optional[str] - age used for filtering + Age used for filtering accent : typing.Optional[str] - accent used for filtering + Accent used for filtering language : typing.Optional[str] - language used for filtering + Language used for filtering search : typing.Optional[str] - search term used for filtering + Search term used for filtering use_cases : typing.Optional[typing.Union[str, typing.Sequence[str]]] - use-case used for filtering + Use-case used for filtering descriptives : typing.Optional[typing.Union[str, typing.Sequence[str]]] - search term used for filtering + Search term used for filtering featured : typing.Optional[bool] Filter featured voices @@ -1673,7 +1677,7 @@ async def get_shared( Filter voices by public owner ID sort : typing.Optional[str] - sort criteria + Sort criteria page : typing.Optional[int] @@ -1769,10 +1773,10 @@ async def get_similar_library_voices( See core.File for more documentation similarity_threshold : typing.Optional[float] - Threshold for voice similarity between provided sample and library voices. Must be in range <0, 2>. The smaller the value the more similar voices will be returned. + Threshold for voice similarity between provided sample and library voices. Values range from 0 to 2. The smaller the value the more similar voices will be returned. top_k : typing.Optional[int] - Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Must be in range <1, 100>. + Number of most similar voices to return. If similarity_threshold is provided, less than this number of voices may be returned. Values range from 1 to 100. request_options : typing.Optional[RequestOptions] Request-specific configuration. diff --git a/src/elevenlabs/voices/types/__init__.py b/src/elevenlabs/voices/types/__init__.py new file mode 100644 index 00000000..d3930f61 --- /dev/null +++ b/src/elevenlabs/voices/types/__init__.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +from .voices_get_shared_request_category import VoicesGetSharedRequestCategory + +__all__ = ["VoicesGetSharedRequestCategory"] diff --git a/src/elevenlabs/voices/types/voices_get_shared_request_category.py b/src/elevenlabs/voices/types/voices_get_shared_request_category.py new file mode 100644 index 00000000..b4b501f3 --- /dev/null +++ b/src/elevenlabs/voices/types/voices_get_shared_request_category.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VoicesGetSharedRequestCategory = typing.Union[typing.Literal["professional", "famous", "high_quality"], typing.Any] diff --git a/src/elevenlabs/workspace/client.py b/src/elevenlabs/workspace/client.py index a39bc845..ac1676f1 100644 --- a/src/elevenlabs/workspace/client.py +++ b/src/elevenlabs/workspace/client.py @@ -9,10 +9,15 @@ from ..types.http_validation_error import HttpValidationError from json.decoder import JSONDecodeError from ..core.api_error import ApiError +from ..types.delete_workspace_group_member_response_model import DeleteWorkspaceGroupMemberResponseModel from ..core.jsonable_encoder import jsonable_encoder +from ..types.add_workspace_group_member_response_model import AddWorkspaceGroupMemberResponseModel +from ..types.add_workspace_invite_response_model import AddWorkspaceInviteResponseModel +from ..types.delete_workspace_invite_response_model import DeleteWorkspaceInviteResponseModel from .types.body_update_member_v_1_workspace_members_post_workspace_role import ( BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole, ) +from ..types.update_workspace_member_response_model import UpdateWorkspaceMemberResponseModel from ..core.client_wrapper import AsyncClientWrapper # this is used as the default value for optional parameters @@ -87,7 +92,7 @@ def search_user_groups( def delete_member_from_user_group( self, group_id: str, *, email: str, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteWorkspaceGroupMemberResponseModel: """ Removes a member from the specified group. This endpoint may only be called by workspace administrators. @@ -104,7 +109,7 @@ def delete_member_from_user_group( Returns ------- - typing.Optional[typing.Any] + DeleteWorkspaceGroupMemberResponseModel Successful Response Examples @@ -134,9 +139,9 @@ def delete_member_from_user_group( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteWorkspaceGroupMemberResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteWorkspaceGroupMemberResponseModel, # type: ignore object_=_response.json(), ), ) @@ -157,7 +162,7 @@ def delete_member_from_user_group( def add_member_to_user_group( self, group_id: str, *, email: str, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> AddWorkspaceGroupMemberResponseModel: """ Adds a member of your workspace to the specified group. This endpoint may only be called by workspace administrators. @@ -174,7 +179,7 @@ def add_member_to_user_group( Returns ------- - typing.Optional[typing.Any] + AddWorkspaceGroupMemberResponseModel Successful Response Examples @@ -204,9 +209,9 @@ def add_member_to_user_group( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + AddWorkspaceGroupMemberResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=AddWorkspaceGroupMemberResponseModel, # type: ignore object_=_response.json(), ), ) @@ -231,7 +236,7 @@ def invite_user( email: str, group_ids: typing.Optional[typing.Sequence[str]] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> AddWorkspaceInviteResponseModel: """ Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. If the user is already in the workspace a 400 error will be returned. @@ -248,7 +253,7 @@ def invite_user( Returns ------- - typing.Optional[typing.Any] + AddWorkspaceInviteResponseModel Successful Response Examples @@ -278,9 +283,9 @@ def invite_user( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + AddWorkspaceInviteResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=AddWorkspaceInviteResponseModel, # type: ignore object_=_response.json(), ), ) @@ -305,7 +310,7 @@ def invite_multiple_users( emails: typing.Sequence[str], group_ids: typing.Optional[typing.Sequence[str]] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> AddWorkspaceInviteResponseModel: """ Sends email invitations to join your workspace to the provided emails. Requires all email addresses to be part of a verified domain. If the users don't have an account they will be prompted to create one. If the users accept these invites they will be added as users to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. @@ -322,7 +327,7 @@ def invite_multiple_users( Returns ------- - typing.Optional[typing.Any] + AddWorkspaceInviteResponseModel Successful Response Examples @@ -352,9 +357,9 @@ def invite_multiple_users( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + AddWorkspaceInviteResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=AddWorkspaceInviteResponseModel, # type: ignore object_=_response.json(), ), ) @@ -375,7 +380,7 @@ def invite_multiple_users( def delete_existing_invitation( self, *, email: str, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteWorkspaceInviteResponseModel: """ Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators. @@ -389,7 +394,7 @@ def delete_existing_invitation( Returns ------- - typing.Optional[typing.Any] + DeleteWorkspaceInviteResponseModel Successful Response Examples @@ -418,9 +423,9 @@ def delete_existing_invitation( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteWorkspaceInviteResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteWorkspaceInviteResponseModel, # type: ignore object_=_response.json(), ), ) @@ -446,7 +451,7 @@ def update_member( is_locked: typing.Optional[bool] = OMIT, workspace_role: typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> UpdateWorkspaceMemberResponseModel: """ Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators. @@ -466,7 +471,7 @@ def update_member( Returns ------- - typing.Optional[typing.Any] + UpdateWorkspaceMemberResponseModel Successful Response Examples @@ -497,9 +502,9 @@ def update_member( try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + UpdateWorkspaceMemberResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=UpdateWorkspaceMemberResponseModel, # type: ignore object_=_response.json(), ), ) @@ -595,7 +600,7 @@ async def main() -> None: async def delete_member_from_user_group( self, group_id: str, *, email: str, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteWorkspaceGroupMemberResponseModel: """ Removes a member from the specified group. This endpoint may only be called by workspace administrators. @@ -612,7 +617,7 @@ async def delete_member_from_user_group( Returns ------- - typing.Optional[typing.Any] + DeleteWorkspaceGroupMemberResponseModel Successful Response Examples @@ -650,9 +655,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteWorkspaceGroupMemberResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteWorkspaceGroupMemberResponseModel, # type: ignore object_=_response.json(), ), ) @@ -673,7 +678,7 @@ async def main() -> None: async def add_member_to_user_group( self, group_id: str, *, email: str, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> AddWorkspaceGroupMemberResponseModel: """ Adds a member of your workspace to the specified group. This endpoint may only be called by workspace administrators. @@ -690,7 +695,7 @@ async def add_member_to_user_group( Returns ------- - typing.Optional[typing.Any] + AddWorkspaceGroupMemberResponseModel Successful Response Examples @@ -728,9 +733,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + AddWorkspaceGroupMemberResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=AddWorkspaceGroupMemberResponseModel, # type: ignore object_=_response.json(), ), ) @@ -755,7 +760,7 @@ async def invite_user( email: str, group_ids: typing.Optional[typing.Sequence[str]] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> AddWorkspaceInviteResponseModel: """ Sends an email invitation to join your workspace to the provided email. If the user doesn't have an account they will be prompted to create one. If the user accepts this invite they will be added as a user to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. If the user is already in the workspace a 400 error will be returned. @@ -772,7 +777,7 @@ async def invite_user( Returns ------- - typing.Optional[typing.Any] + AddWorkspaceInviteResponseModel Successful Response Examples @@ -810,9 +815,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + AddWorkspaceInviteResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=AddWorkspaceInviteResponseModel, # type: ignore object_=_response.json(), ), ) @@ -837,7 +842,7 @@ async def invite_multiple_users( emails: typing.Sequence[str], group_ids: typing.Optional[typing.Sequence[str]] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> AddWorkspaceInviteResponseModel: """ Sends email invitations to join your workspace to the provided emails. Requires all email addresses to be part of a verified domain. If the users don't have an account they will be prompted to create one. If the users accept these invites they will be added as users to your workspace and your subscription using one of your seats. This endpoint may only be called by workspace administrators. @@ -854,7 +859,7 @@ async def invite_multiple_users( Returns ------- - typing.Optional[typing.Any] + AddWorkspaceInviteResponseModel Successful Response Examples @@ -892,9 +897,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + AddWorkspaceInviteResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=AddWorkspaceInviteResponseModel, # type: ignore object_=_response.json(), ), ) @@ -915,7 +920,7 @@ async def main() -> None: async def delete_existing_invitation( self, *, email: str, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + ) -> DeleteWorkspaceInviteResponseModel: """ Invalidates an existing email invitation. The invitation will still show up in the inbox it has been delivered to, but activating it to join the workspace won't work. This endpoint may only be called by workspace administrators. @@ -929,7 +934,7 @@ async def delete_existing_invitation( Returns ------- - typing.Optional[typing.Any] + DeleteWorkspaceInviteResponseModel Successful Response Examples @@ -966,9 +971,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + DeleteWorkspaceInviteResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=DeleteWorkspaceInviteResponseModel, # type: ignore object_=_response.json(), ), ) @@ -994,7 +999,7 @@ async def update_member( is_locked: typing.Optional[bool] = OMIT, workspace_role: typing.Optional[BodyUpdateMemberV1WorkspaceMembersPostWorkspaceRole] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[typing.Any]: + ) -> UpdateWorkspaceMemberResponseModel: """ Updates attributes of a workspace member. Apart from the email identifier, all parameters will remain unchanged unless specified. This endpoint may only be called by workspace administrators. @@ -1014,7 +1019,7 @@ async def update_member( Returns ------- - typing.Optional[typing.Any] + UpdateWorkspaceMemberResponseModel Successful Response Examples @@ -1053,9 +1058,9 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + UpdateWorkspaceMemberResponseModel, construct_type( - type_=typing.Optional[typing.Any], # type: ignore + type_=UpdateWorkspaceMemberResponseModel, # type: ignore object_=_response.json(), ), )