AI-Powered Video Processing
Complete examples for transcription, AI analysis, content moderation, and building searchable video libraries
Learn how to leverage AI capabilities for automatic video analysis, transcription, content tagging, and building intelligent video management systems. This guide provides production-ready examples for real-world applications.
Overview
AI-powered video processing enables:
- Vision Analysis - Automatic tagging, summaries, location detection
- Transcription - Speech-to-text conversion
- Content Moderation - Automatic screening and flagging
- Searchable Libraries - Build intelligent video search systems
- Combined Processing - Use AI alongside video processing
Basic AI Analysis
Start with simple vision analysis to tag and describe videos.
async function analyzeVideo(videoUrl) {
try {
// Submit video with AI analysis enabled
const response = await fetch('https://api.videocascade.com/v1/videos', {
method: 'POST',
headers: {
'Authorization': 'Bearer vca_your_api_key',
'Content-Type': 'application/json',
},
body: JSON.stringify({
fileUrl: videoUrl,
enableAiAnalysis: true,
}),
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
console.log('Analysis started:', data.videoId);
// Wait for analysis to complete
const result = await waitForAnalysisCompletion(data.videoId);
return {
videoId: data.videoId,
tags: result.analysis.data.tags,
summary: result.analysis.data.summary,
location: result.analysis.data.estimatedLocation
};
} catch (error) {
console.error('Error analyzing video:', error);
throw error;
}
}
async function waitForAnalysisCompletion(videoId, maxAttempts = 60) {
const pollInterval = 5000; // 5 seconds
for (let attempt = 0; attempt < maxAttempts; attempt++) {
const response = await fetch(
`https://api.videocascade.com/v1/videos/${videoId}`,
{
headers: {
'Authorization': 'Bearer vca_your_api_key',
},
}
);
const video = await response.json();
console.log(`Analysis status: ${video.analysis?.status || 'not_started'}`);
if (video.analysis?.status === 'completed') {
return video;
}
if (video.analysis?.status === 'failed') {
throw new Error(video.analysis.error || 'Analysis failed');
}
await new Promise(resolve => setTimeout(resolve, pollInterval));
}
throw new Error('Analysis timeout');
}
// Usage
const analysis = await analyzeVideo('https://example.com/video.mp4');
console.log('Tags:', analysis.tags);
console.log('Summary:', analysis.summary);
console.log('Location:', analysis.location);import time
import requests
def analyze_video(video_url):
"""Analyze video with AI"""
try:
# Submit video with AI analysis enabled
response = requests.post(
'https://api.videocascade.com/v1/videos',
headers={
'Authorization': 'Bearer vca_your_api_key',
'Content-Type': 'application/json',
},
json={
'fileUrl': video_url,
'enableAiAnalysis': True,
}
)
response.raise_for_status()
data = response.json()
print(f"Analysis started: {data['videoId']}")
# Wait for analysis to complete
result = wait_for_analysis_completion(data['videoId'])
return {
'videoId': data['videoId'],
'tags': result['analysis']['data']['tags'],
'summary': result['analysis']['data']['summary'],
'location': result['analysis']['data']['estimatedLocation']
}
except Exception as error:
print(f"Error analyzing video: {error}")
raise
def wait_for_analysis_completion(video_id, max_attempts=60):
"""Poll until analysis completes"""
poll_interval = 5 # seconds
for attempt in range(max_attempts):
response = requests.get(
f'https://api.videocascade.com/v1/videos/{video_id}',
headers={'Authorization': 'Bearer vca_your_api_key'}
)
response.raise_for_status()
video = response.json()
status = video.get('analysis', {}).get('status', 'not_started')
print(f"Analysis status: {status}")
if status == 'completed':
return video
if status == 'failed':
error = video.get('analysis', {}).get('error', 'Analysis failed')
raise Exception(error)
time.sleep(poll_interval)
raise TimeoutError('Analysis timeout')
# Usage
analysis = analyze_video('https://example.com/video.mp4')
print(f"Tags: {analysis['tags']}")
print(f"Summary: {analysis['summary']}")
print(f"Location: {analysis['location']}")interface AnalysisData {
tags: string[];
summary: string;
estimatedLocation: string;
}
interface VideoAnalysis {
status: 'not_requested' | 'pending' | 'processing' | 'completed' | 'failed';
progress?: number;
error?: string;
data?: AnalysisData;
}
interface VideoWithAnalysis {
videoId: string;
status: string;
analysis?: VideoAnalysis;
}
interface AnalysisResult {
videoId: string;
tags: string[];
summary: string;
location: string;
}
async function analyzeVideo(videoUrl: string): Promise<AnalysisResult> {
try {
// Submit video with AI analysis enabled
const response = await fetch('https://api.videocascade.com/v1/videos', {
method: 'POST',
headers: {
'Authorization': 'Bearer vca_your_api_key',
'Content-Type': 'application/json',
},
body: JSON.stringify({
fileUrl: videoUrl,
enableAiAnalysis: true,
}),
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
console.log('Analysis started:', data.videoId);
// Wait for analysis to complete
const result = await waitForAnalysisCompletion(data.videoId);
if (!result.analysis?.data) {
throw new Error('Analysis data not available');
}
return {
videoId: data.videoId,
tags: result.analysis.data.tags,
summary: result.analysis.data.summary,
location: result.analysis.data.estimatedLocation
};
} catch (error) {
console.error('Error analyzing video:', error);
throw error;
}
}
async function waitForAnalysisCompletion(
videoId: string,
maxAttempts: number = 60
): Promise<VideoWithAnalysis> {
const pollInterval = 5000; // 5 seconds
for (let attempt = 0; attempt < maxAttempts; attempt++) {
const response = await fetch(
`https://api.videocascade.com/v1/videos/${videoId}`,
{
headers: {
'Authorization': 'Bearer vca_your_api_key',
},
}
);
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const video: VideoWithAnalysis = await response.json();
console.log(`Analysis status: ${video.analysis?.status || 'not_started'}`);
if (video.analysis?.status === 'completed') {
return video;
}
if (video.analysis?.status === 'failed') {
throw new Error(video.analysis.error || 'Analysis failed');
}
await new Promise(resolve => setTimeout(resolve, pollInterval));
}
throw new Error('Analysis timeout');
}
// Usage
const analysis = await analyzeVideo('https://example.com/video.mp4');
console.log('Tags:', analysis.tags);
console.log('Summary:', analysis.summary);
console.log('Location:', analysis.location);Transcription + AI Analysis Together
Combine transcription and visual analysis for comprehensive video understanding.
async function comprehensiveAnalysis(videoUrl) {
// Submit with both AI features enabled
const response = await fetch('https://api.videocascade.com/v1/videos', {
method: 'POST',
headers: {
'Authorization': 'Bearer vca_your_api_key',
'Content-Type': 'application/json',
},
body: JSON.stringify({
fileUrl: videoUrl,
enableAiAnalysis: true, // Visual analysis
enableTranscription: true, // Speech-to-text
}),
});
const data = await response.json();
console.log('Processing started:', data.videoId);
// Wait for all processing to complete
const result = await waitForCompletion(data.videoId);
return {
videoId: data.videoId,
// Visual analysis
tags: result.analysis.data.tags,
summary: result.analysis.data.summary,
location: result.analysis.data.estimatedLocation,
// Transcription
transcript: result.transcription.data.text,
language: result.transcription.data.language,
// Combined insights
insights: generateInsights(result)
};
}
function generateInsights(video) {
const insights = [];
// Check if transcript mentions locations
const locationMentions = extractLocations(video.transcription.data.text);
if (locationMentions.length > 0) {
insights.push({
type: 'location_mentioned',
data: locationMentions,
confidence: 'high'
});
}
// Check if visual location matches transcript
if (video.analysis.data.estimatedLocation !== 'unknown') {
insights.push({
type: 'visual_location',
data: video.analysis.data.estimatedLocation,
confidence: 'medium'
});
}
// Extract topics from transcript
const topics = extractTopics(video.transcription.data.text);
insights.push({
type: 'topics',
data: topics,
source: 'transcript'
});
// Combine with visual tags
const allTopics = [...new Set([...topics, ...video.analysis.data.tags])];
insights.push({
type: 'combined_topics',
data: allTopics,
sources: ['visual', 'audio']
});
return insights;
}
function extractLocations(text) {
// Simple location extraction (improve with NLP library)
const locationPatterns = /\b(in|at|from)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*)\b/g;
const matches = [...text.matchAll(locationPatterns)];
return matches.map(m => m[2]);
}
function extractTopics(text) {
// Simple topic extraction (improve with NLP library)
const words = text.toLowerCase().split(/\W+/);
const stopWords = new Set(['the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at']);
const wordFreq = {};
words.forEach(word => {
if (word.length > 4 && !stopWords.has(word)) {
wordFreq[word] = (wordFreq[word] || 0) + 1;
}
});
return Object.entries(wordFreq)
.sort((a, b) => b[1] - a[1])
.slice(0, 10)
.map(([word]) => word);
}
// Usage
const analysis = await comprehensiveAnalysis('https://example.com/video.mp4');
console.log('Visual tags:', analysis.tags);
console.log('Transcript:', analysis.transcript.substring(0, 200) + '...');
console.log('Insights:', analysis.insights);import re
from collections import Counter
def comprehensive_analysis(video_url):
"""Perform comprehensive AI analysis""" # Submit with both AI features enabled
response = requests.post(
'https://api.videocascade.com/v1/videos',
headers={
'Authorization': 'Bearer vca_your_api_key',
'Content-Type': 'application/json',
},
json={
'fileUrl': video_url,
'enableAiAnalysis': True, # Visual analysis
'enableTranscription': True, # Speech-to-text
}
)
response.raise_for_status()
data = response.json()
print(f"Processing started: {data['videoId']}")
# Wait for all processing to complete
result = wait_for_completion(data['videoId'])
return {
'videoId': data['videoId'],
# Visual analysis
'tags': result['analysis']['data']['tags'],
'summary': result['analysis']['data']['summary'],
'location': result['analysis']['data']['estimatedLocation'],
# Transcription
'transcript': result['transcription']['data']['text'],
'language': result['transcription']['data']['language'],
# Combined insights
'insights': generate_insights(result)
}
def generate_insights(video):
"""Generate insights from combined AI data"""
insights = []
transcript = video['transcription']['data']['text']
# Check if transcript mentions locations
location_mentions = extract_locations(transcript)
if location_mentions:
insights.append({
'type': 'location_mentioned',
'data': location_mentions,
'confidence': 'high'
})
# Check if visual location matches transcript
visual_location = video['analysis']['data']['estimatedLocation']
if visual_location != 'unknown':
insights.append({
'type': 'visual_location',
'data': visual_location,
'confidence': 'medium'
})
# Extract topics from transcript
topics = extract_topics(transcript)
insights.append({
'type': 'topics',
'data': topics,
'source': 'transcript'
})
# Combine with visual tags
all_topics = list(set(topics + video['analysis']['data']['tags']))
insights.append({
'type': 'combined_topics',
'data': all_topics,
'sources': ['visual', 'audio']
})
return insights
def extract_locations(text):
"""Extract location mentions from text"""
pattern = r'\b(in|at|from)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)\*)\b'
matches = re.findall(pattern, text)
return [match[1] for match in matches]
def extract_topics(text):
"""Extract main topics from text"""
words = re.findall(r'\w+', text.lower())
stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at'}
filtered_words = [w for w in words if len(w) > 4 and w not in stop_words]
word_freq = Counter(filtered_words)
return [word for word, _ in word_freq.most_common(10)]
# Usage
analysis = comprehensive_analysis('https://example.com/video.mp4')
print(f"Visual tags: {analysis['tags']}")
print(f"Transcript: {analysis['transcript'][:200]}...")
print(f"Insights: {analysis['insights']}")interface Insight {
type: string;
data: any;
confidence?: string;
source?: string;
sources?: string[];
}
interface ComprehensiveAnalysisResult {
videoId: string;
tags: string[];
summary: string;
location: string;
transcript: string;
language: string;
insights: Insight[];
}
async function comprehensiveAnalysis(
videoUrl: string
): Promise<ComprehensiveAnalysisResult> {
// Submit with both AI features enabled
const response = await fetch('https://api.videocascade.com/v1/videos', {
method: 'POST',
headers: {
'Authorization': 'Bearer vca_your_api_key',
'Content-Type': 'application/json',
},
body: JSON.stringify({
fileUrl: videoUrl,
enableAiAnalysis: true,
enableTranscription: true,
}),
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const data = await response.json();
console.log('Processing started:', data.videoId);
// Wait for all processing to complete
const result = await waitForCompletion(data.videoId);
return {
videoId: data.videoId,
tags: result.analysis.data.tags,
summary: result.analysis.data.summary,
location: result.analysis.data.estimatedLocation,
transcript: result.transcription.data.text,
language: result.transcription.data.language,
insights: generateInsights(result)
};
}
function generateInsights(video: any): Insight[] {
const insights: Insight[] = [];
// Check if transcript mentions locations
const locationMentions = extractLocations(video.transcription.data.text);
if (locationMentions.length > 0) {
insights.push({
type: 'location_mentioned',
data: locationMentions,
confidence: 'high'
});
}
// Check if visual location matches transcript
if (video.analysis.data.estimatedLocation !== 'unknown') {
insights.push({
type: 'visual_location',
data: video.analysis.data.estimatedLocation,
confidence: 'medium'
});
}
// Extract topics from transcript
const topics = extractTopics(video.transcription.data.text);
insights.push({
type: 'topics',
data: topics,
source: 'transcript'
});
// Combine with visual tags
const allTopics = [...new Set([...topics, ...video.analysis.data.tags])];
insights.push({
type: 'combined_topics',
data: allTopics,
sources: ['visual', 'audio']
});
return insights;
}
function extractLocations(text: string): string[] {
const locationPattern = /\b(in|at|from)\s+([A-Z][a-z]+(?:\s+[A-Z][a-z]+)*)\b/g;
const matches = [...text.matchAll(locationPattern)];
return matches.map(m => m[2]);
}
function extractTopics(text: string): string[] {
const words = text.toLowerCase().split(/\W+/);
const stopWords = new Set(['the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at']);
const wordFreq: Record<string, number> = {};
words.forEach(word => {
if (word.length > 4 && !stopWords.has(word)) {
wordFreq[word] = (wordFreq[word] || 0) + 1;
}
});
return Object.entries(wordFreq)
.sort((a, b) => b[1] - a[1])
.slice(0, 10)
.map(([word]) => word);
}
// Usage
const analysis = await comprehensiveAnalysis('https://example.com/video.mp4');
console.log('Visual tags:', analysis.tags);
console.log('Transcript:', analysis.transcript.substring(0, 200) + '...');
console.log('Insights:', analysis.insights);Content Moderation Use Case
Automatically screen videos for inappropriate content before publishing.
class VideoModerator {
constructor(apiKey, moderationRules = {}) {
this.apiKey = apiKey;
this.rules = {
flaggedTags: moderationRules.flaggedTags || [
'violence', 'explicit', 'nsfw', 'inappropriate',
'weapon', 'drug', 'alcohol', 'tobacco'
],
flaggedKeywords: moderationRules.flaggedKeywords || [
'explicit', 'violence', 'hate', 'abuse', 'illegal'
],
requireManualReview: moderationRules.requireManualReview || false
};
}
async moderateVideo(videoUrl, userId) {
try {
// Analyze video
const response = await fetch('https://api.videocascade.com/v1/videos', {
method: 'POST',
headers: {
'Authorization': `Bearer ${this.apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
fileUrl: videoUrl,
enableAiAnalysis: true,
enableTranscription: true,
webhookUrl: `https://yourapp.com/webhooks/moderation/${userId}`
}),
});
const data = await response.json();
// Wait for analysis
const video = await waitForCompletion(data.videoId);
// Perform moderation checks
const moderationResult = this.checkContent(video);
// Save to database
await this.saveModerationResult(data.videoId, userId, moderationResult);
return moderationResult;
} catch (error) {
console.error('Moderation error:', error);
throw error;
}
}
checkContent(video) {
const flags = [];
let riskScore = 0;
// Check visual tags
const visualFlags = video.analysis.data.tags.filter(tag =>
this.rules.flaggedTags.some(flagged =>
tag.toLowerCase().includes(flagged.toLowerCase())
)
);
if (visualFlags.length > 0) {
flags.push({
type: 'visual',
severity: 'high',
tags: visualFlags,
message: `Flagged visual content: ${visualFlags.join(', ')}`
});
riskScore += visualFlags.length * 30;
}
// Check transcript for keywords
if (video.transcription?.data?.text) {
const transcript = video.transcription.data.text.toLowerCase();
const keywordFlags = this.rules.flaggedKeywords.filter(keyword =>
transcript.includes(keyword.toLowerCase())
);
if (keywordFlags.length > 0) {
flags.push({
type: 'audio',
severity: 'medium',
keywords: keywordFlags,
message: `Flagged keywords in transcript: ${keywordFlags.join(', ')}`
});
riskScore += keywordFlags.length * 20;
}
}
// Determine action
let action = 'approved';
if (riskScore > 80 || flags.some(f => f.severity === 'high')) {
action = 'rejected';
} else if (riskScore > 40 || this.rules.requireManualReview) {
action = 'review';
}
return {
videoId: video.videoId,
action,
riskScore,
flags,
summary: video.analysis.data.summary,
tags: video.analysis.data.tags,
timestamp: new Date().toISOString()
};
}
async saveModerationResult(videoId, userId, result) {
// Save to database (example with PostgreSQL)
await db.query(
`INSERT INTO moderation_results
(video_id, user_id, action, risk_score, flags, created_at)
VALUES ($1, $2, $3, $4, $5, NOW())`,
[videoId, userId, result.action, result.riskScore, JSON.stringify(result.flags)]
);
// Log for review if flagged
if (result.action !== 'approved') {
await db.query(
`INSERT INTO review_queue (video_id, priority, created_at)
VALUES ($1, $2, NOW())`,
[videoId, result.riskScore > 80 ? 'high' : 'normal']
);
}
}
}
// Usage
const moderator = new VideoModerator('vca_your_api_key', {
flaggedTags: ['violence', 'explicit', 'weapon'],
flaggedKeywords: ['inappropriate', 'offensive'],
requireManualReview: true
});
const result = await moderator.moderateVideo(
'https://example.com/user-upload.mp4',
'user_12345'
);
console.log('Moderation result:', result.action);
console.log('Risk score:', result.riskScore);
console.log('Flags:', result.flags);
// Handle result
if (result.action === 'approved') {
await publishVideo(result.videoId);
} else if (result.action === 'review') {
await notifyModerators(result);
} else {
await notifyUser(result.videoId, 'rejected');
}from datetime import datetime
import json
class VideoModerator:
def __init__(self, api_key, moderation_rules=None):
self.api_key = api_key
self.rules = moderation_rules or {}
self.rules.setdefault('flaggedTags', [
'violence', 'explicit', 'nsfw', 'inappropriate',
'weapon', 'drug', 'alcohol', 'tobacco'
])
self.rules.setdefault('flaggedKeywords', [
'explicit', 'violence', 'hate', 'abuse', 'illegal'
])
self.rules.setdefault('requireManualReview', False)
def moderate_video(self, video_url, user_id):
"""Moderate video content"""
try:
# Analyze video
response = requests.post(
'https://api.videocascade.com/v1/videos',
headers={
'Authorization': f'Bearer {self.api_key}',
'Content-Type': 'application/json',
},
json={
'fileUrl': video_url,
'enableAiAnalysis': True,
'enableTranscription': True,
'webhookUrl': f"https://yourapp.com/webhooks/moderation/{user_id}"
}
)
response.raise_for_status()
data = response.json()
# Wait for analysis
video = wait_for_completion(data['videoId'])
# Perform moderation checks
moderation_result = self.check_content(video)
# Save to database
self.save_moderation_result(data['videoId'], user_id, moderation_result)
return moderation_result
except Exception as error:
print(f"Moderation error: {error}")
raise
def check_content(self, video):
"""Check video content against moderation rules"""
flags = []
risk_score = 0
# Check visual tags
visual_flags = [
tag for tag in video['analysis']['data']['tags']
if any(flagged.lower() in tag.lower()
for flagged in self.rules['flaggedTags'])
]
if visual_flags:
flags.append({
'type': 'visual',
'severity': 'high',
'tags': visual_flags,
'message': f"Flagged visual content: {', '.join(visual_flags)}"
})
risk_score += len(visual_flags) * 30
# Check transcript for keywords
transcript_data = video.get('transcription', {}).get('data')
if transcript_data and transcript_data.get('text'):
transcript = transcript_data['text'].lower()
keyword_flags = [
keyword for keyword in self.rules['flaggedKeywords']
if keyword.lower() in transcript
]
if keyword_flags:
flags.append({
'type': 'audio',
'severity': 'medium',
'keywords': keyword_flags,
'message': f"Flagged keywords: {', '.join(keyword_flags)}"
})
risk_score += len(keyword_flags) * 20
# Determine action
action = 'approved'
if risk_score > 80 or any(f['severity'] == 'high' for f in flags):
action = 'rejected'
elif risk_score > 40 or self.rules['requireManualReview']:
action = 'review'
return {
'videoId': video['videoId'],
'action': action,
'riskScore': risk_score,
'flags': flags,
'summary': video['analysis']['data']['summary'],
'tags': video['analysis']['data']['tags'],
'timestamp': datetime.utcnow().isoformat()
}
def save_moderation_result(self, video_id, user_id, result):
"""Save moderation result to database"""
# Save to database (example with PostgreSQL)
db.execute(
"""INSERT INTO moderation_results
(video_id, user_id, action, risk_score, flags, created_at)
VALUES (%s, %s, %s, %s, %s, NOW())""",
(video_id, user_id, result['action'], result['riskScore'],
json.dumps(result['flags']))
)
# Log for review if flagged
if result['action'] != 'approved':
priority = 'high' if result['riskScore'] > 80 else 'normal'
db.execute(
"""INSERT INTO review_queue (video_id, priority, created_at)
VALUES (%s, %s, NOW())""",
(video_id, priority)
)
# Usage
moderator = VideoModerator('vca_your_api_key', {
'flaggedTags': ['violence', 'explicit', 'weapon'],
'flaggedKeywords': ['inappropriate', 'offensive'],
'requireManualReview': True
})
result = moderator.moderate_video(
'https://example.com/user-upload.mp4',
'user_12345'
)
print(f"Moderation result: {result['action']}")
print(f"Risk score: {result['riskScore']}")
print(f"Flags: {result['flags']}")
# Handle result
if result['action'] == 'approved':
publish_video(result['videoId'])
elif result['action'] == 'review':
notify_moderators(result)
else:
notify_user(result['videoId'], 'rejected')Building a Searchable Video Library
Create an intelligent video CMS with full-text search capabilities.
// Elasticsearch/PostgreSQL Full-Text Search Example
class VideoLibraryBuilder {
constructor(apiKey, db) {
this.apiKey = apiKey;
this.db = db; // Database/Search engine client
}
async processAndIndexVideo(videoUrl, metadata) {
// Process video with AI analysis and transcription
const response = await fetch('https://api.videocascade.com/v1/videos', {
method: 'POST',
headers: {
Authorization: `Bearer ${this.apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
fileUrl: videoUrl,
enableAiAnalysis: true,
enableTranscription: true,
enableThumbnail: true,
webhookUrl: 'https://yourapp.com/webhooks/video-indexed',
}),
});
const data = await response.json();
// Wait for completion
const video = await waitForCompletion(data.videoId);
// Build search document
const searchDocument = this.buildSearchDocument(video, metadata);
// Index in database
await this.indexVideo(searchDocument);
return searchDocument;
}
buildSearchDocument(video, metadata) {
const analysis = video.analysis.data;
const transcription = video.transcription.data;
return {
videoId: video.videoId,
title: metadata.title,
description: metadata.description || analysis.summary,
// AI-generated fields
tags: analysis.tags,
aiSummary: analysis.summary,
location: analysis.estimatedLocation,
// Transcription
transcript: transcription.text,
language: transcription.language,
// Searchable text (combines everything)
searchableText: [
metadata.title,
metadata.description || '',
analysis.summary,
...analysis.tags,
analysis.estimatedLocation !== 'unknown'
? analysis.estimatedLocation
: '',
transcription.text,
].join(' '),
// Metadata
videoUrl: video.finalVideoUrl,
thumbnailUrl: video.thumbnailUrl,
duration: video.videoMetadata.duration,
uploadedBy: metadata.userId,
uploadedAt: new Date(),
// Search vectors (for advanced search)
tagVector: analysis.tags.join(','),
topicVector: this.extractTopics(transcription.text),
};
}
async indexVideo(document) {
// PostgreSQL Full-Text Search
await this.db.query(
`INSERT INTO videos_search
(video_id, title, description, tags, transcript, searchable_text,
location, uploaded_at, search_vector)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8,
to_tsvector('english', $6))`,
[
document.videoId,
document.title,
document.description,
document.tags,
document.transcript,
document.searchableText,
document.location,
document.uploadedAt,
]
);
// Also index in Elasticsearch for advanced search
await this.elasticsearchClient.index({
index: 'videos',
id: document.videoId,
body: document,
});
}
async searchVideos(query, filters = {}) {
// Full-text search with PostgreSQL
const results = await this.db.query(
`SELECT video_id, title, description, tags, location,
ts_rank(search_vector, plainto_tsquery('english', $1)) as rank
FROM videos_search
WHERE search_vector @@ plainto_tsquery('english', $1)
${filters.location ? 'AND location ILIKE $2' : ''}
${filters.tags ? 'AND tags && $3' : ''}
ORDER BY rank DESC
LIMIT 50`,
[query, filters.location, filters.tags]
);
return results.rows;
}
extractTopics(text) {
// Extract main topics for semantic search
// (simplified - use NLP library for production)
const words = text.toLowerCase().split(/\W+/);
const stopWords = new Set(['the', 'a', 'an', 'and', 'or', 'but']);
const wordFreq = {};
words.forEach(word => {
if (word.length > 4 && !stopWords.has(word)) {
wordFreq[word] = (wordFreq[word] || 0) + 1;
}
});
return Object.entries(wordFreq)
.sort((a, b) => b[1] - a[1])
.slice(0, 20)
.map(([word]) => word)
.join(',');
}
}
// Usage Example
const library = new VideoLibraryBuilder('vca_your_api_key', db);
// Process and index video
const document = await library.processAndIndexVideo(
'https://example.com/lecture.mp4',
{
title: 'Introduction to Machine Learning',
description: 'Comprehensive overview of ML fundamentals',
userId: 'prof_12345',
}
);
// Search videos
const results = await library.searchVideos('machine learning algorithms', {
tags: ['education', 'technology'],
location: 'university',
});
console.log(`Found ${results.length} matching videos`);
results.forEach(video => {
console.log(`- ${video.title} (relevance: ${video.rank})`);
});Building a Video CMS with AI
Complete example of an AI-powered video content management system.
// Complete Video CMS with AI Features
interface VideoCMSConfig {
apiKey: string;
dbConnection: any;
storageConfig: any;
webhookUrl: string;
}
class AIVideoStandardCMS {
private apiKey: string;
private db: any;
private storage: any;
private webhookUrl: string;
constructor(config: VideoCMSConfig) {
this.apiKey = config.apiKey;
this.db = config.dbConnection;
this.storage = config.storageConfig;
this.webhookUrl = config.webhookUrl;
}
async uploadVideo(file: File, metadata: any, userId: string) {
// 1. Upload to temporary storage
const tempUrl = await this.storage.upload(file);
// 2. Submit for processing
const response = await fetch('https://api.videocascade.com/v1/videos', {
method: 'POST',
headers: {
Authorization: `Bearer ${this.apiKey}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({
fileUrl: tempUrl,
enableAiAnalysis: true,
enableTranscription: true,
enableThumbnail: true,
normalizeAudio: true,
aspectRatio: '16:9',
compressionQuality: 95,
webhookUrl: `${this.webhookUrl}/video-processed`,
}),
});
const data = await response.json();
// 3. Save to database
await this.db.videos.create({
videoId: data.videoId,
userId,
title: metadata.title,
status: 'processing',
uploadedAt: new Date(),
metadata: JSON.stringify(metadata),
});
return data.videoId;
}
async handleVideoProcessed(payload: any) {
const { videoId, finalVideoUrl, analysis, transcription } = payload;
// Update video record
await this.db.videos.update({
where: { videoId },
data: {
status: 'completed',
videoUrl: finalVideoUrl,
thumbnailUrl: payload.thumbnailUrl,
duration: payload.durationMinutes,
processedAt: new Date(),
},
});
// Save AI analysis
await this.db.videoAnalysis.create({
videoId,
tags: analysis.data.tags,
summary: analysis.data.summary,
location: analysis.data.estimatedLocation,
transcript: transcription.data.text,
language: transcription.data.language,
});
// Build search index
await this.buildSearchIndex(videoId);
// Generate recommendations
await this.generateRecommendations(videoId);
// Notify user
await this.notifyUser(videoId, 'completed');
}
async buildSearchIndex(videoId: string) {
const video = await this.db.videos.findUnique({
where: { videoId },
include: { analysis: true },
});
const searchText = [
video.title,
video.description || '',
video.analysis.summary,
...video.analysis.tags,
video.analysis.transcript,
].join(' ');
await this.db.execute(
`UPDATE videos_search
SET search_vector = to_tsvector('english', $1)
WHERE video_id = $2`,
[searchText, videoId]
);
}
async generateRecommendations(videoId: string) {
const video = await this.db.videos.findUnique({
where: { videoId },
include: { analysis: true },
});
// Find similar videos by tags
const similarVideos = await this.db.videos.findMany({
where: {
analysis: {
tags: {
hasSome: video.analysis.tags,
},
},
videoId: { not: videoId },
status: 'completed',
},
take: 10,
});
// Save recommendations
await this.db.recommendations.createMany({
data: similarVideos.map((similar, index) => ({
videoId,
recommendedVideoId: similar.videoId,
score: 1 - index * 0.1,
reason: 'similar_tags',
})),
});
}
async searchVideos(query: string, options: any = {}) {
return await this.db.videos.findMany({
where: {
OR: [
{ title: { contains: query, mode: 'insensitive' } },
{ analysis: { summary: { contains: query, mode: 'insensitive' } } },
{ analysis: { tags: { hasSome: query.split(' ') } } },
{
analysis: { transcript: { contains: query, mode: 'insensitive' } },
},
],
status: 'completed',
},
include: {
analysis: true,
user: { select: { name: true, avatar: true } },
},
orderBy: options.orderBy || { uploadedAt: 'desc' },
take: options.limit || 20,
skip: options.offset || 0,
});
}
async getVideoRecommendations(videoId: string) {
return await this.db.recommendations.findMany({
where: { videoId },
include: {
recommendedVideo: {
include: {
analysis: true,
user: { select: { name: true } },
},
},
},
orderBy: { score: 'desc' },
take: 5,
});
}
async notifyUser(videoId: string, status: string) {
const video = await this.db.videos.findUnique({
where: { videoId },
include: { user: true },
});
// Send email/push notification
await this.sendNotification({
userId: video.userId,
type: 'video_status',
title: `Video ${status}`,
message: `Your video "${video.title}" is ${status}`,
link: `/videos/${videoId}`,
});
}
}
// Usage
const cms = new AIVideoCMS({
apiKey: 'vca_your_api_key',
dbConnection: prisma,
storageConfig: s3Client,
webhookUrl: 'https://yourapp.com/webhooks',
});
// Upload video
const videoId = await cms.uploadVideo(
file,
{
title: 'My Video',
description: 'Description here',
category: 'education',
},
userId
);
// Search videos
const results = await cms.searchVideos('machine learning');
// Get recommendations
const recommendations = await cms.getVideoRecommendations(videoId);Best Practices
1. Use Webhooks for AI Processing
AI analysis can take time - use webhooks:
{
fileUrl: videoUrl,
enableAiAnalysis: true,
enableTranscription: true,
webhookUrl: 'https://yourapp.com/webhooks/ai-complete'
}2. Cache Analysis Results
Store AI data to avoid re-processing:
await redis.set(
`analysis:${videoId}`,
JSON.stringify(analysis),
'EX',
86400 * 30 // 30 days
);3. Implement Fallbacks
Handle analysis failures gracefully:
if (analysis.status === 'failed') {
// Use filename/metadata for tags
const fallbackTags = extractTagsFromFilename(video.title);
await saveVideo({ tags: fallbackTags });
}4. Build Comprehensive Search
Combine all AI data for search:
const searchText = [
video.title,
video.description,
...analysis.tags,
analysis.summary,
transcription.text,
analysis.location,
].join(' ');