Advanced configuration options for the VerbalisAI Python SDK
Advanced configuration options and settings for the VerbalisAI Python SDK.
from verbalisai import VerbalisAI
# Initialize with basic configuration
client = VerbalisAI(
api_key="your-api-key",
base_url="https://api.verbalisai.com/api",
timeout=30.0,
max_retries=3
)
from verbalisai import VerbalisAI
import httpx
# Advanced client configuration
client = VerbalisAI(
api_key="your-api-key",
base_url="https://api.verbalisai.com/api",
# Request settings
timeout=60.0,
max_retries=5,
retry_delay=1.0,
# HTTP client settings
http_client=httpx.AsyncClient(
limits=httpx.Limits(
max_keepalive_connections=20,
max_connections=100
),
headers={
"User-Agent": "MyApp/1.0 VerbalisAI-Python"
}
),
# Logging configuration
enable_logging=True,
log_level="INFO",
log_requests=True,
log_responses=False, # Don't log response bodies for privacy
# Default transcription settings
default_model="mini",
default_language="auto",
# File upload settings
upload_chunk_size=8 * 1024 * 1024, # 8MB chunks
upload_timeout=300.0 # 5 minutes for uploads
)
import os
from verbalisai import VerbalisAI
# Client automatically reads these environment variables
client = VerbalisAI() # No need to pass config explicitly
# Supported environment variables:
# VERBALISAI_API_KEY - Your API key
# VERBALISAI_BASE_URL - API base URL
# VERBALISAI_TIMEOUT - Request timeout in seconds
# VERBALISAI_MAX_RETRIES - Maximum retry attempts
# VERBALISAI_LOG_LEVEL - Logging level (DEBUG, INFO, WARNING, ERROR)
# .env file
VERBALISAI_API_KEY=sk-your-api-key-here
VERBALISAI_BASE_URL=https://api.verbalisai.com/api
VERBALISAI_TIMEOUT=60
VERBALISAI_MAX_RETRIES=5
VERBALISAI_LOG_LEVEL=INFO
VERBALISAI_ENABLE_LOGGING=true
VERBALISAI_DEFAULT_MODEL=pro
VERBALISAI_UPLOAD_CHUNK_SIZE=10485760 # 10MB
import os
from dotenv import load_dotenv
from verbalisai import VerbalisAI
# Load environment variables from .env file
load_dotenv()
# Client configuration is loaded automatically
client = VerbalisAI()
# Or explicitly load custom settings
client = VerbalisAI(
api_key=os.getenv("CUSTOM_API_KEY"),
base_url=os.getenv("CUSTOM_BASE_URL", "https://api.verbalisai.com/api")
)
import json
from verbalisai import VerbalisAI
# Load configuration from JSON file
def load_config_from_json(config_path):
with open(config_path, 'r') as f:
config = json.load(f)
return VerbalisAI(
api_key=config['api_key'],
base_url=config.get('base_url', 'https://api.verbalisai.com/api'),
timeout=config.get('timeout', 30.0),
max_retries=config.get('max_retries', 3),
default_model=config.get('default_model', 'mini')
)
# config.json
"""
{
"api_key": "your-api-key",
"base_url": "https://api.verbalisai.com/api",
"timeout": 60.0,
"max_retries": 5,
"default_model": "pro",
"enable_logging": true,
"log_level": "INFO"
}
"""
client = load_config_from_json("config.json")
import yaml
from verbalisai import VerbalisAI
# Load configuration from YAML file
def load_config_from_yaml(config_path):
with open(config_path, 'r') as f:
config = yaml.safe_load(f)
return VerbalisAI(**config['verbalisai'])
# config.yaml
"""
verbalisai:
api_key: "your-api-key"
base_url: "https://api.verbalisai.com/api"
timeout: 60.0
max_retries: 5
default_model: "pro"
enable_logging: true
log_level: "INFO"
# Default transcription settings
defaults:
model: "pro"
language: "auto"
diarize: false
topics: true
summarization: false
"""
client = load_config_from_yaml("config.yaml")
import logging
from verbalisai import VerbalisAI
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Enable SDK logging
client = VerbalisAI(
api_key="your-api-key",
enable_logging=True,
log_level="INFO"
)
import logging
from verbalisai import VerbalisAI
from verbalisai.logging import VerbalisAILogger
# Custom logging configuration
class CustomLogger(VerbalisAILogger):
def log_request(self, method, url, headers, body):
# Custom request logging
self.logger.info(f"Request: {method} {url}")
if body:
self.logger.debug(f"Request body size: {len(str(body))} chars")
def log_response(self, status_code, headers, body):
# Custom response logging
if status_code >= 400:
self.logger.error(f"Error response: {status_code}")
else:
self.logger.info(f"Response: {status_code}")
# Use custom logger
client = VerbalisAI(
api_key="your-api-key",
logger=CustomLogger(name="my-app")
)
from verbalisai import VerbalisAI
from verbalisai.retry import RetryConfig
import asyncio
# Custom retry configuration
retry_config = RetryConfig(
max_retries=5,
base_delay=1.0,
max_delay=60.0,
backoff_factor=2.0,
jitter=True,
# Retry conditions
retry_on_status_codes=[429, 500, 502, 503, 504],
retry_on_exceptions=[asyncio.TimeoutError, ConnectionError]
)
client = VerbalisAI(
api_key="your-api-key",
retry_config=retry_config
)
import asyncio
from verbalisai import VerbalisAI
async def custom_retry_logic():
client = VerbalisAI(api_key="your-api-key")
max_retries = 5
base_delay = 1.0
for attempt in range(max_retries):
try:
result = await client.transcriptions.create(
audio_url="https://example.com/audio.mp3"
)
return result
except Exception as e:
if attempt == max_retries - 1:
raise # Last attempt, re-raise the exception
# Exponential backoff with jitter
delay = base_delay * (2 ** attempt) + (0.1 * attempt)
print(f"Retry {attempt + 1} in {delay:.1f} seconds...")
await asyncio.sleep(delay)
# Usage
result = await custom_retry_logic()
import httpx
from verbalisai import VerbalisAI
# Custom HTTP client with specific settings
http_client = httpx.AsyncClient(
# Connection pooling
limits=httpx.Limits(
max_keepalive_connections=20,
max_connections=100,
keepalive_expiry=30.0
),
# Timeouts
timeout=httpx.Timeout(
connect=10.0,
read=30.0,
write=10.0,
pool=5.0
),
# Headers
headers={
"User-Agent": "MyApplication/1.0",
"Accept-Encoding": "gzip, deflate",
"Connection": "keep-alive"
},
# SSL/TLS settings
verify=True, # Verify SSL certificates
# Proxy settings
proxies={
"http://": "http://proxy.example.com:8080",
"https://": "https://proxy.example.com:8080"
}
)
client = VerbalisAI(
api_key="your-api-key",
http_client=http_client
)
import httpx
import ssl
from verbalisai import VerbalisAI
# Custom SSL context
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = True
ssl_context.verify_mode = ssl.CERT_REQUIRED
# Load custom certificates
ssl_context.load_cert_chain("client.crt", "client.key")
ssl_context.load_verify_locations("ca.crt")
http_client = httpx.AsyncClient(
verify=ssl_context
)
client = VerbalisAI(
api_key="your-api-key",
http_client=http_client
)
from verbalisai import VerbalisAI
from verbalisai.types import TranscriptionDefaults
# Set default transcription options
defaults = TranscriptionDefaults(
model="pro",
language="en",
diarize=True,
topics=True,
summarization=True,
summary_type="bullets",
timestamp_style="word",
redact_pii=False
)
client = VerbalisAI(
api_key="your-api-key",
transcription_defaults=defaults
)
# These defaults will be used if not overridden
transcription = await client.transcriptions.create(
audio_url="https://example.com/audio.mp3"
# model="pro", diarize=True, etc. applied automatically
)
from verbalisai.types import FileUploadDefaults
upload_defaults = FileUploadDefaults(
chunk_size=10 * 1024 * 1024, # 10MB chunks
timeout=300.0, # 5 minutes
auto_delete_days=30,
public=False,
# Default folder structure
folder_template="uploads/{year}/{month}",
# Default tags
default_tags=["uploaded", "pending-processing"]
)
client = VerbalisAI(
api_key="your-api-key",
upload_defaults=upload_defaults
)
from verbalisai import VerbalisAI
# Configure for specific regions
regions = {
"us-east": "https://us-east.api.verbalisai.com/api",
"eu-west": "https://eu-west.api.verbalisai.com/api",
"asia-pacific": "https://ap.api.verbalisai.com/api"
}
# Initialize client for specific region
client_us = VerbalisAI(
api_key="your-api-key",
base_url=regions["us-east"]
)
client_eu = VerbalisAI(
api_key="your-api-key",
base_url=regions["eu-west"]
)
from verbalisai import VerbalisAI
# Custom endpoint configuration
client = VerbalisAI(
api_key="your-api-key",
base_url="https://custom.api.endpoint.com/v1",
# Override specific endpoints
endpoints={
"transcription": "/custom/transcribe",
"files": "/custom/files",
"usage": "/custom/usage"
}
)
import httpx
from verbalisai import VerbalisAI
# Optimized for high-throughput applications
http_client = httpx.AsyncClient(
limits=httpx.Limits(
max_keepalive_connections=50, # Keep many connections alive
max_connections=200, # Allow many concurrent connections
keepalive_expiry=60.0 # Keep connections alive longer
),
# Reduce connection overhead
http2=True, # Use HTTP/2 if available
# Optimize timeouts for performance
timeout=httpx.Timeout(
connect=5.0, # Quick connection timeout
read=120.0, # Longer read timeout for large responses
write=30.0, # Reasonable write timeout
pool=2.0 # Quick pool timeout
)
)
client = VerbalisAI(
api_key="your-api-key",
http_client=http_client
)
from verbalisai import VerbalisAI
import asyncio
# Memory-efficient configuration for large file processing
client = VerbalisAI(
api_key="your-api-key",
# Streaming settings
stream_responses=True, # Stream large responses
upload_chunk_size=5 * 1024 * 1024, # Smaller chunks to reduce memory
# Connection limits to prevent memory issues
max_concurrent_requests=10,
# Enable automatic cleanup
auto_cleanup=True,
cleanup_interval=300 # Clean up every 5 minutes
)
# Use context manager for automatic resource cleanup
async def process_large_batch():
async with client:
# Process many files
tasks = []
for i in range(100):
task = client.transcriptions.create(
audio_url=f"https://example.com/audio{i}.mp3"
)
tasks.append(task)
# Process in batches to manage memory
batch_size = 10
for i in range(0, len(tasks), batch_size):
batch = tasks[i:i + batch_size]
results = await asyncio.gather(*batch)
# Process results immediately to free memory
for result in results:
await process_transcription(result)
async def process_transcription(transcription):
# Process and save transcription immediately
pass
asyncio.run(process_large_batch())
from verbalisai import VerbalisAI
from verbalisai.config import validate_config
import asyncio
async def validate_client_config():
config = {
"api_key": "your-api-key",
"base_url": "https://api.verbalisai.com/api",
"timeout": 30.0,
"max_retries": 3
}
# Validate configuration before creating client
validation_result = await validate_config(config)
if validation_result.is_valid:
client = VerbalisAI(**config)
print("Configuration is valid")
else:
print(f"Configuration errors: {validation_result.errors}")
asyncio.run(validate_client_config())
async def health_check():
client = VerbalisAI(api_key="your-api-key")
try:
# Test basic connectivity
health = await client.health_check()
print(f"API Status: {health.status}")
print(f"Response Time: {health.response_time_ms}ms")
print(f"Region: {health.region}")
print(f"Version: {health.api_version}")
if health.status == "healthy":
print("✅ Client configuration is working correctly")
else:
print("❌ API health check failed")
except Exception as e:
print(f"❌ Health check failed: {e}")
asyncio.run(health_check())
import os
from verbalisai import VerbalisAI
class ConfigurationManager:
def __init__(self):
self.profiles = {
"development": {
"api_key": os.getenv("DEV_API_KEY"),
"base_url": "https://dev-api.verbalisai.com/api",
"timeout": 10.0,
"max_retries": 1,
"enable_logging": True,
"log_level": "DEBUG"
},
"staging": {
"api_key": os.getenv("STAGING_API_KEY"),
"base_url": "https://staging-api.verbalisai.com/api",
"timeout": 30.0,
"max_retries": 3,
"enable_logging": True,
"log_level": "INFO"
},
"production": {
"api_key": os.getenv("PROD_API_KEY"),
"base_url": "https://api.verbalisai.com/api",
"timeout": 60.0,
"max_retries": 5,
"enable_logging": False,
"log_level": "ERROR"
}
}
def get_client(self, profile="production"):
config = self.profiles.get(profile)
if not config:
raise ValueError(f"Unknown profile: {profile}")
return VerbalisAI(**config)
# Usage
config_manager = ConfigurationManager()
# Different clients for different environments
dev_client = config_manager.get_client("development")
prod_client = config_manager.get_client("production")
from verbalisai import VerbalisAI
from verbalisai.exceptions import VerbalisAIError
class CustomErrorHandler:
def __init__(self):
self.error_counts = {}
async def handle_error(self, error: VerbalisAIError, context: dict):
error_type = type(error).__name__
self.error_counts[error_type] = self.error_counts.get(error_type, 0) + 1
# Custom error handling logic
if error.status_code == 429:
# Rate limit exceeded
return await self.handle_rate_limit(error, context)
elif error.status_code >= 500:
# Server errors
return await self.handle_server_error(error, context)
else:
# Client errors
return await self.handle_client_error(error, context)
async def handle_rate_limit(self, error, context):
# Custom rate limit handling
print(f"Rate limit exceeded. Retry after: {error.retry_after}")
return {"retry": True, "delay": error.retry_after}
async def handle_server_error(self, error, context):
# Log server errors for monitoring
print(f"Server error: {error.status_code} - {error.message}")
return {"retry": True, "delay": 5.0}
async def handle_client_error(self, error, context):
# Don't retry client errors
print(f"Client error: {error.status_code} - {error.message}")
return {"retry": False}
# Use custom error handler
error_handler = CustomErrorHandler()
client = VerbalisAI(
api_key="your-api-key",
error_handler=error_handler
)
Ready to see real-world examples? Check out the Examples guide for practical implementations and use cases with the Python SDK.