TypeScript SDK
TypeScript SDK
Learn how to instrument your TypeScript/JavaScript AI agents for deep debugging and decision visibility.
Installation
npm install opswald# oryarn add opswald# orpnpm add opswaldQuick Start
import { init, trace, span, shutdown } from 'opswald';
// Initialize with your API keyinit({ apiKey: 'your-key', baseUrl: 'https://api.opswald.com' });
// Create a trace around your agent runawait trace('my-agent-run', {}, async (t) => { await span('reasoning', { kind: 'llm_call', provider: 'openai', model: 'gpt-4o' }, async (s) => { s.setInput({ prompt: 'Analyze this data...' });
// Your AI logic here const result = await openai.chat.completions.create({...});
s.setOutput({ analysis: result.choices[0].message.content }); s.setTokens(120, 340); });});
// Graceful shutdownawait shutdown();Auto-Instrumentation
The fastest way to get started is auto-instrumentation:
import { init, instrument } from 'opswald';import OpenAI from 'openai';import Anthropic from '@anthropic-ai/sdk';
// Initializeinit({ apiKey: 'your-key' });
// Create client instances and instrument themconst openai = new OpenAI();const anthropic = new Anthropic();
instrument(openai);instrument(anthropic);
// Now all calls on these clients are automatically tracedconst response = await openai.chat.completions.create({ model: "gpt-4o", messages: [{ role: "user", content: "Hello!" }]});// ^ This call is automatically captured as a spanManual Traces and Spans
For full control over what gets captured:
Traces
A trace represents a complete agent run or workflow:
// Async function wrapper (recommended)await trace('user-query-pipeline', {}, async (t) => { // Your agent logic here return result;});
// Function decoratorimport { traceFunction } from 'opswald';
const processUserQuery = traceFunction(async (query: string) => { // This entire function becomes a trace return analysis;});Spans
Spans represent individual steps within a trace:
await trace('data-analysis', {}, async (t) => { // LLM reasoning step await span('extract-insights', { kind: 'llm_call' }, async (s) => { s.setInput({ rawData: data }); const insights = await analyzeData(data); s.setOutput({ insights }); });
// Tool usage step await span('search-database', { kind: 'tool_call' }, async (s) => { s.setInput({ query: insights.searchTerms }); const results = await database.search(insights.searchTerms); s.setOutput({ results }); });
// Error handling try { await riskyOperation(); } catch (error) { await span('error-recovery', { kind: 'error' }, async (s) => { s.setError({ error: error.message }); const fallback = await handleError(error); s.setOutput({ fallback }); }); }});Span Types
Use the appropriate kind for different operations:
llm_call— LLM API calls (OpenAI, Anthropic, etc.)tool_call— Function calls, API calls, database querieserror— Error handling and recoverycustom— Everything else
Metadata and Context
Add rich context to your spans:
await span('user-intent-analysis', { kind: 'llm_call', provider: 'openai', model: 'gpt-4o', temperature: 0.1}, async (s) => { // Input/output s.setInput({ userMessage: message, chatHistory: history }); s.setOutput({ intent, confidence: 0.92 });
// Token usage s.setTokens(150, 45);
// Custom metadata s.setMetadata({ userId: user.id, sessionId: session.id, modelVersion: 'v2.1' });});Real-World Example
Here’s how to trace a complete RAG (Retrieval-Augmented Generation) pipeline:
import { init, trace, span } from 'opswald';import OpenAI from 'openai';import { VectorDB } from './vector-db';
init({ apiKey: 'your-key' });const db = new VectorDB();const client = new OpenAI();
async function ragPipeline(userQuestion: string): Promise<string> { return await trace('rag-query', {}, async (t) => { // 1. Generate embeddings for user question const embedding = await span('embed-question', { kind: 'llm_call', model: 'text-embedding-3-small' }, async (s) => { s.setInput({ question: userQuestion });
const response = await client.embeddings.create({ model: "text-embedding-3-small", input: userQuestion });
const embedding = response.data[0].embedding; s.setOutput({ embeddingLength: embedding.length });
return embedding; });
// 2. Search vector database const docs = await span('vector-search', { kind: 'tool_call' }, async (s) => { s.setInput({ embedding: embedding.slice(0, 5) }); // First 5 dims for brevity
const docs = await db.similaritySearch(embedding, 5); s.setOutput({ numDocs: docs.length, relevanceScores: docs.map(d => d.score) });
return docs; });
// 3. Generate final answer const answer = await span('generate-answer', { kind: 'llm_call', model: 'gpt-4o' }, async (s) => { const context = docs.map(doc => doc.text).join('\n'); const prompt = `Context:\n${context}\n\nQuestion: ${userQuestion}\n\nAnswer:`;
s.setInput({ contextLength: context.length, question: userQuestion });
const response = await client.chat.completions.create({ model: "gpt-4o", messages: [{ role: "user", content: prompt }] });
const answer = response.choices[0].message.content; s.setOutput({ answer }); s.setTokens( response.usage?.prompt_tokens || 0, response.usage?.completion_tokens || 0 );
return answer; });
return answer; });}
// Usageconst answer = await ragPipeline("What are the benefits of vector databases?");Streaming Support
The TypeScript SDK supports streaming API calls:
import { span } from 'opswald';import OpenAI from 'openai';
const client = new OpenAI();
await span('streaming-chat', { kind: 'llm_call', model: 'gpt-4o' }, async (s) => { s.setInput({ prompt: "Tell me a story" });
const stream = await client.chat.completions.create({ model: "gpt-4o", messages: [{ role: "user", content: "Tell me a story" }], stream: true });
let fullResponse = ''; let tokenCount = 0;
for await (const chunk of stream) { const content = chunk.choices[0]?.delta?.content || ''; fullResponse += content; tokenCount++; }
s.setOutput({ response: fullResponse }); s.setTokens(0, tokenCount); // Approximate output tokens});Best Practices
1. Meaningful Span Names
Use descriptive names that explain what the step does:
// ❌ Genericawait span('llm', { kind: 'llm_call' }, async (s) => { // ...});
// ✅ Descriptiveawait span('extract-key-entities', { kind: 'llm_call' }, async (s) => { // ...});2. Capture Decisions
Use metadata to capture the reasoning behind decisions:
await span('route-user-query', { kind: 'custom' }, async (s) => { const confidence = await classifyIntent(query); const route = confidence > 0.8 ? 'expert' : 'general';
s.setMetadata({ classificationConfidence: confidence, routingDecision: route, reasoning: `Confidence ${confidence.toFixed(2)} ${confidence > 0.8 ? '>' : '<='} 0.8 threshold` });});3. Error Recovery Patterns
Always trace error handling so you can debug failures:
try { const result = await riskyAiOperation();} catch (error) { await span('error-recovery', { kind: 'error' }, async (s) => { s.setError({ error: error.message, errorType: error.constructor.name });
// Capture recovery logic const fallback = await getFallbackResponse(); s.setOutput({ fallbackUsed: true, fallback });
return fallback; });}4. Nested Workflows
Use nested spans to capture sub-workflows:
await trace('content-generation', {}, async (t) => { await span('research-phase', { kind: 'custom' }, async (research) => { await span('web-search', { kind: 'tool_call' }, async (s) => { // Search implementation });
await span('summarize-results', { kind: 'llm_call' }, async (s) => { // Summarization implementation }); });
await span('writing-phase', { kind: 'custom' }, async (writing) => { await span('generate-outline', { kind: 'llm_call' }, async (s) => { // Outline generation });
await span('write-content', { kind: 'llm_call' }, async (s) => { // Content writing }); });});Configuration
Customize the SDK behavior:
init({ apiKey: 'your-key', baseUrl: 'https://api.opswald.com', // Custom endpoint batchSize: 50, // Spans per batch flushIntervalMs: 5000, // Auto-flush interval maxRetries: 3, // Retry failed requests debug: true // Enable debug logging});Decorators (Experimental)
Use decorators for automatic tracing:
import { spanFunction, traceFunction } from 'opswald';
class AIAgent { @traceFunction() async processRequest(input: string) { const analysis = await this.analyzeInput(input); const response = await this.generateResponse(analysis); return response; }
@spanFunction({ kind: 'llm_call', model: 'gpt-4o' }) private async analyzeInput(input: string) { // This method is automatically wrapped in a span return await llm.analyze(input); }
@spanFunction({ kind: 'llm_call', model: 'gpt-4o' }) private async generateResponse(analysis: any) { // This method is automatically wrapped in a span return await llm.generate(analysis); }}Graceful Shutdown
Always flush pending spans before your application exits:
import { shutdown } from 'opswald';
// Handle graceful shutdownprocess.on('SIGINT', async () => { console.log('Shutting down...'); await shutdown(5000); // 5 second timeout process.exit(0);});
// Or in application cleanuptry { // Your application code await runAgent();} finally { await shutdown(); // Ensures all spans are sent}Next.js Integration
For Next.js applications, initialize in a layout or middleware:
import { init } from 'opswald';
// Initialize once at app startupinit({ apiKey: process.env.OPSWALD_API_KEY! });
export default function RootLayout({ children,}: { children: React.ReactNode;}) { return ( <html lang="en"> <body>{children}</body> </html> );}
// app/api/chat/route.tsimport { trace, span } from 'opswald';
export async function POST(request: Request) { return await trace('chat-api', {}, async (t) => { const { message } = await request.json();
const response = await span('generate-response', { kind: 'llm_call', model: 'gpt-4o' }, async (s) => { s.setInput({ message });
const result = await openai.chat.completions.create({ model: "gpt-4o", messages: [{ role: "user", content: message }] });
const response = result.choices[0].message.content; s.setOutput({ response });
return response; });
return Response.json({ response }); });}