JavaScript SDK Quickstart

Get up and running with the VerbalisAI JavaScript SDK in just a few minutes. Works with Node.js, React, Next.js, and other JavaScript environments.

Installation

Install the SDK using npm or yarn:

npm install @verbalisai/sdk
yarn add @verbalisai/sdk

Authentication

Get your API key from the VerbalisAI Dashboard and set it as an environment variable:

export VERBALISAI_API_KEY="your-api-key-here"

Basic Usage

Initialize the Client

import { VerbalisAI } from '@verbalisai/sdk';

// Initialize client (automatically uses VERBALISAI_API_KEY env var)
const client = new VerbalisAI();

// Or pass API key directly
const client = new VerbalisAI({
  apiKey: 'your-api-key'
});

Create Your First Transcription

async function createTranscription() {
  try {
    // Transcribe audio from URL
    const transcription = await client.transcriptions.create({
      audioUrl: 'https://example.com/audio.mp3',
      model: 'mini',  // Options: nano, mini, pro
      language: 'auto'  // Auto-detect language
    });
    
    console.log('Transcription:', transcription.text);
    console.log(`Duration: ${transcription.duration} seconds`);
  } catch (error) {
    console.error('Error:', error.message);
  }
}

createTranscription();

Advanced Transcription Features

async function advancedTranscription() {
  try {
    const transcription = await client.transcriptions.create({
      audioUrl: 'https://example.com/meeting.mp3',
      model: 'pro',
      language: 'en',
      
      // Advanced features
      diarize: true,  // Identify different speakers
      topics: true,   // Extract topics
      summarization: true,  // Generate summary
      summaryType: 'bullets',  // bullets, paragraphs, markdown
      
      // Entity detection
      entityDetection: true,
      entityTypes: ['person', 'location', 'organization'],
      
      // PII redaction
      redactPii: true,
      redactPiiPolicies: ['person', 'email', 'phoneNumber'],
      redactPiiSub: 'hash',  // hash, mask, remove
      
      // Timestamps
      timestampStyle: 'word'  // word, segment
    });
    
    console.log('Full transcription:', transcription.text);
    console.log('Topics:', transcription.topics);
    console.log('Summary:', transcription.summary.text);
    
    // Print segments with speaker information
    transcription.segments.forEach(segment => {
      const speaker = segment.speakerId ? `Speaker ${segment.speakerId}` : 'Unknown';
      console.log(`${speaker} (${segment.start.toFixed(1)}s): ${segment.text}`);
    });
  } catch (error) {
    console.error('Error:', error.message);
  }
}

advancedTranscription();

Working with Files

Upload Local Audio File (Node.js)

import fs from 'fs';

async function uploadAndTranscribe() {
  try {
    // Upload file to VerbalisAI storage
    const fileStream = fs.createReadStream('local_audio.mp3');
    const fileInfo = await client.files.upload({
      file: fileStream,
      filename: 'local_audio.mp3'
    });
    
    console.log(`File uploaded: ${fileInfo.url}`);
    
    // Transcribe uploaded file
    const transcription = await client.transcriptions.create({
      audioUrl: fileInfo.url,
      model: 'mini'
    });
    
    console.log('Transcription:', transcription.text);
  } catch (error) {
    console.error('Error:', error.message);
  }
}

uploadAndTranscribe();

Upload from Browser

// HTML: <input type="file" id="audioFile" accept="audio/*">

async function uploadFromBrowser() {
  const fileInput = document.getElementById('audioFile');
  const file = fileInput.files[0];
  
  if (!file) {
    console.log('No file selected');
    return;
  }
  
  try {
    // Upload file to VerbalisAI storage
    const fileInfo = await client.files.upload({
      file: file,
      filename: file.name
    });
    
    console.log(`File uploaded: ${fileInfo.url}`);
    
    // Transcribe uploaded file
    const transcription = await client.transcriptions.create({
      audioUrl: fileInfo.url,
      model: 'mini'
    });
    
    console.log('Transcription:', transcription.text);
  } catch (error) {
    console.error('Error:', error.message);
  }
}

Get File Information

async function manageFiles() {
  try {
    // Get storage info
    const storageInfo = await client.files.getStorageInfo();
    console.log(`Storage used: ${storageInfo.usedBytes} bytes`);
    console.log(`Storage limit: ${storageInfo.limitBytes} bytes`);
    
    // Get file info
    const fileInfo = await client.files.getFileInfo('path/to/file.mp3');
    console.log(`File size: ${fileInfo.sizeBytes} bytes`);
    console.log(`Upload date: ${fileInfo.createdAt}`);
  } catch (error) {
    console.error('Error:', error.message);
  }
}

manageFiles();

Managing Transcriptions

Retrieve Transcriptions

async function manageTranscriptions() {
  try {
    // Get specific transcription
    const transcription = await client.transcriptions.get('transcription-id');
    console.log(`Status: ${transcription.status}`);
    
    // Get user's transcriptions
    const transcriptions = await client.transcriptions.list();
    console.log(`Total transcriptions: ${transcriptions.length}`);
    
    // Get recent transcriptions
    const recent = await client.transcriptions.listRecent({ limit: 5 });
    recent.forEach(t => {
      console.log(`${t.id}: ${t.status} - ${t.createdAt}`);
    });
  } catch (error) {
    console.error('Error:', error.message);
  }
}

manageTranscriptions();

Delete Transcription

async function cleanup() {
  try {
    // Delete a transcription
    await client.transcriptions.delete('transcription-id');
    console.log('Transcription deleted');
  } catch (error) {
    console.error('Error:', error.message);
  }
}

cleanup();

Error Handling

import { VerbalisAI, VerbalisAIError } from '@verbalisai/sdk';

async function handleErrors() {
  const client = new VerbalisAI();
  
  try {
    const transcription = await client.transcriptions.create({
      audioUrl: 'https://invalid-url.com/audio.mp3'
    });
  } catch (error) {
    if (error instanceof VerbalisAIError) {
      console.log(`API Error: ${error.message}`);
      console.log(`Status Code: ${error.statusCode}`);
      
      // Handle specific errors
      switch (error.statusCode) {
        case 400:
          console.log('Bad request - check your parameters');
          break;
        case 401:
          console.log('Authentication failed - check your API key');
          break;
        case 429:
          console.log('Rate limit exceeded - please wait');
          break;
        case 500:
          console.log('Server error - please try again');
          break;
      }
    } else {
      console.log(`Unexpected error: ${error.message}`);
    }
  }
}

handleErrors();

Usage Analytics

async function trackUsage() {
  try {
    // Get usage overview
    const usage = await client.usage.getOverview();
    console.log(`Credits used this month: ${usage.creditsUsed}`);
    console.log(`Credits remaining: ${usage.creditsRemaining}`);
    console.log(`Total transcriptions: ${usage.totalTranscriptions}`);
    
    // Get detailed analytics
    const analytics = await client.usage.getAnalytics({
      startDate: '2024-01-01',
      endDate: '2024-01-31'
    });
    
    analytics.dailyUsage.forEach(day => {
      console.log(`${day.date}: ${day.creditsUsed} credits, ${day.transcriptions} files`);
    });
  } catch (error) {
    console.error('Error:', error.message);
  }
}

trackUsage();

Framework Integration

React Hook

// hooks/useVerbalisAI.js
import { VerbalisAI } from '@verbalisai/sdk';
import { useState, useEffect } from 'react';

export function useVerbalisAI() {
  const [client, setClient] = useState(null);
  const [loading, setLoading] = useState(true);

  useEffect(() => {
    const verbalisClient = new VerbalisAI({
      apiKey: process.env.REACT_APP_VERBALISAI_API_KEY
    });
    setClient(verbalisClient);
    setLoading(false);
  }, []);

  return { client, loading };
}

// Component usage
import { useVerbalisAI } from './hooks/useVerbalisAI';

function TranscriptionComponent() {
  const { client, loading } = useVerbalisAI();
  const [transcription, setTranscription] = useState(null);

  const handleTranscribe = async (audioUrl) => {
    if (!client) return;
    
    try {
      const result = await client.transcriptions.create({
        audioUrl,
        model: 'mini'
      });
      setTranscription(result);
    } catch (error) {
      console.error('Transcription error:', error);
    }
  };

  if (loading) return <div>Loading...</div>;

  return (
    <div>
      <button onClick={() => handleTranscribe('https://example.com/audio.mp3')}>
        Transcribe Audio
      </button>
      {transcription && (
        <div>
          <h3>Transcription Result:</h3>
          <p>{transcription.text}</p>
        </div>
      )}
    </div>
  );
}

Next.js API Route

// pages/api/transcribe.js
import { VerbalisAI } from '@verbalisai/sdk';

const client = new VerbalisAI({
  apiKey: process.env.VERBALISAI_API_KEY
});

export default async function handler(req, res) {
  if (req.method !== 'POST') {
    return res.status(405).json({ error: 'Method not allowed' });
  }

  try {
    const { audioUrl, model = 'mini' } = req.body;
    
    const transcription = await client.transcriptions.create({
      audioUrl,
      model
    });
    
    res.status(200).json({ transcription });
  } catch (error) {
    console.error('Transcription error:', error);
    res.status(500).json({ error: error.message });
  }
}

Express.js Server

import express from 'express';
import { VerbalisAI } from '@verbalisai/sdk';

const app = express();
const client = new VerbalisAI({
  apiKey: process.env.VERBALISAI_API_KEY
});

app.use(express.json());

app.post('/transcribe', async (req, res) => {
  try {
    const { audioUrl, model = 'mini' } = req.body;
    
    const transcription = await client.transcriptions.create({
      audioUrl,
      model
    });
    
    res.json({ transcription });
  } catch (error) {
    console.error('Error:', error);
    res.status(500).json({ error: error.message });
  }
});

app.listen(3000, () => {
  console.log('Server running on port 3000');
});

Webhooks

Set up webhooks for long-running transcriptions:

async function webhookExample() {
  try {
    // Start transcription with webhook
    const transcription = await client.transcriptions.create({
      audioUrl: 'https://example.com/long-audio.mp3',
      model: 'pro',
      webhookUrl: 'https://yoursite.com/webhook-endpoint',
      webhookAuthHeaderName: 'Authorization',
      webhookAuthHeaderValue: 'Bearer your-secret',
      waitUntilComplete: false  // Don't wait, use webhook
    });
    
    console.log(`Transcription started: ${transcription.id}`);
    console.log('You\'ll receive a webhook when processing completes');
  } catch (error) {
    console.error('Error:', error.message);
  }
}

webhookExample();

Common Patterns

Batch Processing

async function processMultipleFiles(audioUrls) {
  const transcriptions = [];
  
  // Process files concurrently
  const promises = audioUrls.map(async (url) => {
    try {
      const transcription = await client.transcriptions.create({
        audioUrl: url,
        model: 'mini'
      });
      return transcription;
    } catch (error) {
      console.error(`Error processing ${url}:`, error.message);
      return null;
    }
  });
  
  const results = await Promise.all(promises);
  
  // Filter successful results
  const successful = results.filter(r => r !== null);
  console.log(`Successfully processed ${successful.length} out of ${audioUrls.length} files`);
  
  return successful;
}

// Usage
const urls = [
  'https://example.com/audio1.mp3',
  'https://example.com/audio2.mp3',
  'https://example.com/audio3.mp3'
];

processMultipleFiles(urls);

Polling for Completion

async function waitForCompletion(transcriptionId, maxWait = 300000) {
  const startTime = Date.now();
  
  while (true) {
    try {
      const transcription = await client.transcriptions.get(transcriptionId);
      
      if (transcription.status === 'completed') {
        return transcription;
      } else if (transcription.status === 'failed') {
        throw new Error(`Transcription failed: ${transcription.error}`);
      }
      
      // Check timeout
      const elapsed = Date.now() - startTime;
      if (elapsed > maxWait) {
        throw new Error(`Transcription didn't complete within ${maxWait / 1000} seconds`);
      }
      
      // Wait before next check
      await new Promise(resolve => setTimeout(resolve, 5000));
    } catch (error) {
      throw error;
    }
  }
}

// Usage
async function example() {
  try {
    // Start transcription
    const transcription = await client.transcriptions.create({
      audioUrl: 'https://example.com/audio.mp3',
      waitUntilComplete: false
    });
    
    console.log(`Started transcription: ${transcription.id}`);
    
    // Wait for completion
    const completed = await waitForCompletion(transcription.id);
    console.log('Transcription completed:', completed.text);
  } catch (error) {
    console.error('Error:', error.message);
  }
}

example();

TypeScript Support

The SDK includes full TypeScript definitions:

import { VerbalisAI, Transcription, TranscriptionOptions } from '@verbalisai/sdk';

const client = new VerbalisAI({
  apiKey: process.env.VERBALISAI_API_KEY!
});

const options: TranscriptionOptions = {
  audioUrl: 'https://example.com/audio.mp3',
  model: 'mini',
  language: 'en',
  diarize: true,
  topics: true
};

const transcription: Transcription = await client.transcriptions.create(options);

Next Steps

Now that you have the basics, explore more advanced features:

Ready to dive deeper? Check out the full transcription guide for comprehensive documentation on all available features and options.