TranscriptOptionalParams: {
    audio_end_at?: number;
    audio_start_from?: number;
    auto_chapters?: boolean;
    auto_highlights?: boolean;
    boost_param?: TranscriptBoostParam;
    content_safety?: boolean;
    content_safety_confidence?: number;
    custom_spelling?: TranscriptCustomSpelling[];
    custom_topics?: boolean;
    disfluencies?: boolean;
    dual_channel?: boolean;
    entity_detection?: boolean;
    filter_profanity?: boolean;
    format_text?: boolean;
    iab_categories?: boolean;
    language_code?: LiteralUnion<TranscriptLanguageCode, string> | null;
    language_detection?: boolean;
    punctuate?: boolean;
    redact_pii?: boolean;
    redact_pii_audio?: boolean;
    redact_pii_audio_quality?: RedactPiiAudioQuality;
    redact_pii_policies?: PiiPolicy[];
    redact_pii_sub?: SubstitutionPolicy | null;
    sentiment_analysis?: boolean;
    speaker_labels?: boolean;
    speakers_expected?: number | null;
    speech_model?: SpeechModel | null;
    speech_threshold?: number | null;
    summarization?: boolean;
    summary_model?: SummaryModel;
    summary_type?: SummaryType;
    topics?: string[];
    webhook_auth_header_name?: string | null;
    webhook_auth_header_value?: string | null;
    webhook_url?: string;
    word_boost?: string[];
}

The parameters for creating a transcript

Type declaration

  • Optionalaudio_end_at?: number

    The point in time, in milliseconds, to stop transcribing in your media file

  • Optionalaudio_start_from?: number

    The point in time, in milliseconds, to begin transcribing in your media file

  • Optionalauto_chapters?: boolean

    Enable Auto Chapters, can be true or false

  • Optionalauto_highlights?: boolean

    Enable Key Phrases, either true or false

  • Optionalboost_param?: TranscriptBoostParam

    The word boost parameter value

  • Optionalcontent_safety?: boolean

    Enable Content Moderation, can be true or false

  • Optionalcontent_safety_confidence?: number

    The confidence threshold for the Content Moderation model. Values must be between 25 and 100.

  • Optionalcustom_spelling?: TranscriptCustomSpelling[]

    Customize how words are spelled and formatted using to and from values

  • Optionalcustom_topics?: boolean

    Enable custom topics, either true or false

  • Optionaldisfluencies?: boolean

    Transcribe Filler Words, like "umm", in your media file; can be true or false

  • Optionaldual_channel?: boolean

    Enable Dual Channel transcription, can be true or false.

  • Optionalentity_detection?: boolean

    Enable Detection, can be true or false

  • Optionalfilter_profanity?: boolean

    Filter profanity from the transcribed text, can be true or false

  • Optionalformat_text?: boolean

    Enable Text Formatting, can be true or false

  • Optionaliab_categories?: boolean

    Enable Topic Detection, can be true or false

  • Optionallanguage_code?: LiteralUnion<TranscriptLanguageCode, string> | null

    The language of your audio file. Possible values are found in Supported Languages. The default value is 'en_us'.

  • Optionallanguage_detection?: boolean

    Enable Automatic language detection, either true or false.

  • Optionalpunctuate?: boolean

    Enable Automatic Punctuation, can be true or false

  • Optionalredact_pii?: boolean

    Redact PII from the transcribed text using the Redact PII model, can be true or false

  • Optionalredact_pii_audio?: boolean

    Generate a copy of the original media file with spoken PII "beeped" out, can be true or false. See PII redaction for more details.

  • Optionalredact_pii_audio_quality?: RedactPiiAudioQuality

    Controls the filetype of the audio created by redact_pii_audio. Currently supports mp3 (default) and wav. See PII redaction for more details.

    "mp3"
    
  • Optionalredact_pii_policies?: PiiPolicy[]

    The list of PII Redaction policies to enable. See PII redaction for more details.

  • Optionalredact_pii_sub?: SubstitutionPolicy | null

    The replacement logic for detected PII, can be "entity_type" or "hash". See PII redaction for more details.

  • Optionalsentiment_analysis?: boolean

    Enable Analysis, can be true or false

  • Optionalspeaker_labels?: boolean

    Enable Speaker diarization, can be true or false

  • Optionalspeakers_expected?: number | null

    Tells the speaker label model how many speakers it should attempt to identify, up to 10. See Speaker diarization for more details.

    "null
    
  • Optionalspeech_model?: SpeechModel | null

    The speech model to use for the transcription. When null, the default model is used.

    null
    
  • Optionalspeech_threshold?: number | null

    Reject audio files that contain less than this fraction of speech. Valid values are in the range [0", 1] inclusive.

    "null
    
  • Optionalsummarization?: boolean

    Enable Summarization, can be true or false

  • Optionalsummary_model?: SummaryModel

    The model to summarize the transcript

    informative
    
  • Optionalsummary_type?: SummaryType

    The type of summary

    bullets
    
  • Optionaltopics?: string[]

    The list of custom topics

  • Optionalwebhook_auth_header_name?: string | null

    The header name to be sent with the transcript completed or failed webhook requests

    null
    
  • Optionalwebhook_auth_header_value?: string | null

    The header value to send back with the transcript completed or failed webhook requests for added security

    null
    
  • Optionalwebhook_url?: string

    The URL to which we send webhook requests. We sends two different types of webhook requests. One request when a transcript is completed or failed, and one request when the redacted audio is ready if redact_pii_audio is enabled.

  • Optionalword_boost?: string[]

    The list of custom vocabulary to boost transcription probability for

{
"speech_model": null,
"language_code": "en_us",
"punctuate": true,
"format_text": true,
"dual_channel": true,
"webhook_url": "https://your-webhook-url.tld/path",
"webhook_auth_header_name": "webhook-secret",
"webhook_auth_header_value": "webhook-secret-value",
"auto_highlights": true,
"audio_start_from": 10,
"audio_end_at": 280,
"word_boost": [
"aws",
"azure",
"google cloud"
],
"boost_param": "high",
"filter_profanity": true,
"redact_pii": true,
"redact_pii_audio": true,
"redact_pii_audio_quality": "mp3",
"redact_pii_policies": [
"us_social_security_number",
"credit_card_number"
],
"redact_pii_sub": "hash",
"speaker_labels": true,
"speakers_expected": 2,
"content_safety": true,
"iab_categories": true,
"language_detection": false,
"custom_spelling": [],
"disfluencies": false,
"sentiment_analysis": true,
"auto_chapters": true,
"entity_detection": true,
"speech_threshold": 0.5,
"summarization": true,
"summary_model": "informative",
"summary_type": "bullets",
"custom_topics": true,
"topics": []
}