Get started with the VerbalisAI Python SDK in minutes
Get up and running with the VerbalisAI Python SDK in just a few minutes.
Install the SDK using pip:
pip install verbalisai-sdk
Get your API key from the VerbalisAI Dashboard and set it as an environment variable:
export VERBALISAI_API_KEY="your-api-key-here"
from verbalisai import VerbalisAI
import asyncio
# Initialize client (automatically uses VERBALISAI_API_KEY env var)
client = VerbalisAI()
# Or pass API key directly
client = VerbalisAI(api_key="your-api-key")
async def main():
# Transcribe audio from URL
transcription = await client.transcriptions.create(
audio_url="https://example.com/audio.mp3",
model="mini", # Options: nano, mini, pro
language="auto" # Auto-detect language
)
print("Transcription:")
print(transcription.text)
print(f"Duration: {transcription.duration} seconds")
# Run the async function
asyncio.run(main())
async def advanced_transcription():
transcription = await client.transcriptions.create(
audio_url="https://example.com/meeting.mp3",
model="pro",
language="en",
# Advanced features
diarize=True, # Identify different speakers
topics=True, # Extract topics
summarization=True, # Generate summary
summary_type="bullets", # bullets, paragraphs, markdown
# Entity detection
entity_detection=True,
entity_types=["person", "location", "organization"],
# PII redaction
redact_pii=True,
redact_pii_policies=["person", "email", "phone_number"],
redact_pii_sub="hash", # hash, mask, remove
# Timestamps
timestamp_style="word" # word, segment
)
print("Full transcription:", transcription.text)
print("Topics:", transcription.topics)
print("Summary:", transcription.summary.text)
# Print segments with speaker information
for segment in transcription.segments:
speaker = f"Speaker {segment.speaker_id}" if segment.speaker_id else "Unknown"
print(f"{speaker} ({segment.start:.1f}s): {segment.text}")
asyncio.run(advanced_transcription())
async def upload_and_transcribe():
# Upload file to VerbalisAI storage
with open("local_audio.mp3", "rb") as audio_file:
file_info = await client.files.upload(
file=audio_file,
filename="local_audio.mp3"
)
print(f"File uploaded: {file_info.url}")
# Transcribe uploaded file
transcription = await client.transcriptions.create(
audio_url=file_info.url,
model="mini"
)
print("Transcription:", transcription.text)
asyncio.run(upload_and_transcribe())
async def manage_files():
# Get storage info
storage_info = await client.files.get_storage_info()
print(f"Storage used: {storage_info.used_bytes} bytes")
print(f"Storage limit: {storage_info.limit_bytes} bytes")
# Get file info
file_info = await client.files.get_file_info("path/to/file.mp3")
print(f"File size: {file_info.size_bytes} bytes")
print(f"Upload date: {file_info.created_at}")
asyncio.run(manage_files())
async def manage_transcriptions():
# Get specific transcription
transcription = await client.transcriptions.get("transcription-id")
print(f"Status: {transcription.status}")
# Get user's transcriptions
transcriptions = await client.transcriptions.list()
print(f"Total transcriptions: {len(transcriptions)}")
# Get recent transcriptions
recent = await client.transcriptions.list_recent(limit=5)
for t in recent:
print(f"{t.id}: {t.status} - {t.created_at}")
asyncio.run(manage_transcriptions())
async def cleanup():
# Delete a transcription
await client.transcriptions.delete("transcription-id")
print("Transcription deleted")
asyncio.run(cleanup())
from verbalisai import VerbalisAI, VerbalisAIError
async def handle_errors():
client = VerbalisAI()
try:
transcription = await client.transcriptions.create(
audio_url="https://invalid-url.com/audio.mp3"
)
except VerbalisAIError as e:
print(f"API Error: {e.message}")
print(f"Status Code: {e.status_code}")
# Handle specific errors
if e.status_code == 400:
print("Bad request - check your parameters")
elif e.status_code == 401:
print("Authentication failed - check your API key")
elif e.status_code == 429:
print("Rate limit exceeded - please wait")
elif e.status_code == 500:
print("Server error - please try again")
except Exception as e:
print(f"Unexpected error: {e}")
asyncio.run(handle_errors())
async def track_usage():
# Get usage overview
usage = await client.usage.get_overview()
print(f"Credits used this month: {usage.credits_used}")
print(f"Credits remaining: {usage.credits_remaining}")
print(f"Total transcriptions: {usage.total_transcriptions}")
# Get detailed analytics
analytics = await client.usage.get_analytics(
start_date="2024-01-01",
end_date="2024-01-31"
)
for day in analytics.daily_usage:
print(f"{day.date}: {day.credits_used} credits, {day.transcriptions} files")
asyncio.run(track_usage())
For better resource management, use the async context manager:
async def context_manager_example():
async with VerbalisAI() as client:
transcription = await client.transcriptions.create(
audio_url="https://example.com/audio.mp3"
)
print(transcription.text)
# Client automatically cleaned up
asyncio.run(context_manager_example())
Set up webhooks for long-running transcriptions:
async def webhook_example():
# Start transcription with webhook
transcription = await client.transcriptions.create(
audio_url="https://example.com/long-audio.mp3",
model="pro",
webhook_url="https://yoursite.com/webhook-endpoint",
webhook_auth_header_name="Authorization",
webhook_auth_header_value="Bearer your-secret",
wait_until_complete=False # Don't wait, use webhook
)
print(f"Transcription started: {transcription.id}")
print("You'll receive a webhook when processing completes")
asyncio.run(webhook_example())
Now that you have the basics, explore more advanced features:
import asyncio
async def process_multiple_files(audio_urls):
client = VerbalisAI()
async def transcribe_single(url):
try:
return await client.transcriptions.create(
audio_url=url,
model="mini"
)
except Exception as e:
print(f"Error processing {url}: {e}")
return None
# Process files concurrently
tasks = [transcribe_single(url) for url in audio_urls]
results = await asyncio.gather(*tasks)
# Filter successful results
successful = [r for r in results if r is not None]
print(f"Successfully processed {len(successful)} out of {len(audio_urls)} files")
return successful
# Usage
urls = [
"https://example.com/audio1.mp3",
"https://example.com/audio2.mp3",
"https://example.com/audio3.mp3"
]
asyncio.run(process_multiple_files(urls))
import asyncio
async def wait_for_completion(transcription_id, max_wait=300):
"""Poll for transcription completion with timeout"""
client = VerbalisAI()
start_time = asyncio.get_event_loop().time()
while True:
transcription = await client.transcriptions.get(transcription_id)
if transcription.status == "completed":
return transcription
elif transcription.status == "failed":
raise Exception(f"Transcription failed: {transcription.error}")
# Check timeout
elapsed = asyncio.get_event_loop().time() - start_time
if elapsed > max_wait:
raise TimeoutError(f"Transcription didn't complete within {max_wait} seconds")
# Wait before next check
await asyncio.sleep(5)
# Usage
async def example():
client = VerbalisAI()
# Start transcription
transcription = await client.transcriptions.create(
audio_url="https://example.com/audio.mp3",
wait_until_complete=False
)
print(f"Started transcription: {transcription.id}")
# Wait for completion
completed = await wait_for_completion(transcription.id)
print("Transcription completed:", completed.text)
asyncio.run(example())
Ready to dive deeper? Check out the full transcription guide for comprehensive documentation on all available features and options.