Skip to content

SDK Examples

SDK Examples

Real-world examples showing how to debug different types of AI agents and workflows.

Chatbot Agent

A conversational agent with memory and context switching:

import opswald
from openai import OpenAI
opswald.init(api_key='your-key')
client = OpenAI()
class ChatBot:
def __init__(self):
self.conversation_history = []
def chat(self, user_message: str) -> str:
with opswald.trace('chat-conversation') as t:
# 1. Analyze user intent
intent = self._analyze_intent(user_message)
# 2. Retrieve relevant context
context = self._get_context(intent, user_message)
# 3. Generate response
response = self._generate_response(user_message, context)
# 4. Update conversation history
self._update_history(user_message, response)
return response
def _analyze_intent(self, message: str) -> dict:
with opswald.span('analyze-intent', kind='llm_call', model='gpt-4o') as s:
s.set_input({'user_message': message})
response = client.chat.completions.create(
model="gpt-4o",
messages=[{
"role": "system",
"content": "Classify the user's intent. Return JSON: {\"intent\": \"question|request|casual\", \"confidence\": 0.95, \"entities\": []}"
}, {
"role": "user",
"content": message
}],
temperature=0.1
)
intent = eval(response.choices[0].message.content) # Parse JSON
s.set_output(intent)
s.set_tokens(
input_tokens=response.usage.prompt_tokens,
output_tokens=response.usage.completion_tokens
)
# Capture decision reasoning
s.set_metadata({
'classification_confidence': intent['confidence'],
'routing_strategy': 'high_confidence' if intent['confidence'] > 0.8 else 'fallback'
})
return intent
def _get_context(self, intent: dict, message: str) -> str:
with opswald.span('retrieve-context', kind='tool_call') as s:
s.set_input({'intent': intent['intent'], 'entities': intent['entities']})
if intent['intent'] == 'question':
# Search knowledge base
context = f"Retrieved {len(intent['entities'])} relevant documents"
else:
# Use conversation history
context = "\n".join(self.conversation_history[-3:]) # Last 3 exchanges
s.set_output({'context_type': intent['intent'], 'context_length': len(context)})
return context
def _generate_response(self, message: str, context: str) -> str:
with opswald.span('generate-response', kind='llm_call', model='gpt-4o') as s:
system_prompt = f"You are a helpful assistant. Context: {context}"
s.set_input({
'user_message': message,
'context_length': len(context),
'history_turns': len(self.conversation_history)
})
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": system_prompt},
*[{"role": msg["role"], "content": msg["content"]}
for msg in self.conversation_history[-3:]], # Recent history
{"role": "user", "content": message}
],
temperature=0.7
)
bot_response = response.choices[0].message.content
s.set_output({'response': bot_response, 'response_length': len(bot_response)})
s.set_tokens(
input_tokens=response.usage.prompt_tokens,
output_tokens=response.usage.completion_tokens
)
return bot_response
def _update_history(self, user_msg: str, bot_msg: str):
with opswald.span('update-history', kind='custom') as s:
s.set_input({'history_size_before': len(self.conversation_history)})
self.conversation_history.extend([
{"role": "user", "content": user_msg},
{"role": "assistant", "content": bot_msg}
])
# Keep only recent history
if len(self.conversation_history) > 20:
self.conversation_history = self.conversation_history[-20:]
s.set_output({'history_size_after': len(self.conversation_history)})
# Usage
bot = ChatBot()
response = bot.chat("What's the weather like today?")

Multi-Agent Research Pipeline

A research agent that coordinates multiple specialized sub-agents:

import opswald
import asyncio
from typing import List, Dict
from openai import OpenAI
opswald.init(api_key='your-key')
client = OpenAI()
class ResearchPipeline:
def research_topic(self, topic: str) -> Dict:
with opswald.trace('research-pipeline') as t:
# 1. Generate research questions
questions = self._generate_questions(topic)
# 2. Research each question in parallel
findings = self._research_questions(questions)
# 3. Synthesize findings
synthesis = self._synthesize_findings(findings)
# 4. Generate final report
report = self._generate_report(topic, synthesis)
return report
def _generate_questions(self, topic: str) -> List[str]:
with opswald.span('generate-research-questions', kind='llm_call', model='gpt-4o') as s:
s.set_input({'topic': topic})
response = client.chat.completions.create(
model="gpt-4o",
messages=[{
"role": "system",
"content": "Generate 5 specific research questions about the topic. Return as JSON array."
}, {
"role": "user",
"content": f"Topic: {topic}"
}],
temperature=0.3
)
questions = eval(response.choices[0].message.content) # Parse JSON array
s.set_output({'num_questions': len(questions), 'questions': questions})
s.set_tokens(
input_tokens=response.usage.prompt_tokens,
output_tokens=response.usage.completion_tokens
)
return questions
def _research_questions(self, questions: List[str]) -> List[Dict]:
with opswald.span('parallel-research', kind='custom') as s:
s.set_input({'questions': questions, 'parallel_agents': len(questions)})
findings = []
for i, question in enumerate(questions):
# Each question gets its own span
finding = self._research_single_question(question, agent_id=i)
findings.append(finding)
s.set_output({'findings_collected': len(findings)})
return findings
def _research_single_question(self, question: str, agent_id: int) -> Dict:
with opswald.span(f'agent-{agent_id}-research', kind='custom') as s:
s.set_input({'question': question, 'agent_id': agent_id})
# Simulate research steps
search_results = self._web_search(question)
analysis = self._analyze_sources(search_results)
finding = {
'question': question,
'sources': len(search_results),
'key_insights': analysis['insights'],
'confidence': analysis['confidence']
}
s.set_output(finding)
s.set_metadata({
'research_quality': 'high' if analysis['confidence'] > 0.8 else 'medium',
'source_diversity': len(set(r['domain'] for r in search_results))
})
return finding
def _web_search(self, query: str) -> List[Dict]:
with opswald.span('web-search', kind='tool_call') as s:
s.set_input({'query': query})
# Simulate web search results
results = [
{'title': f'Result {i}', 'url': f'https://example{i}.com', 'domain': f'example{i}.com'}
for i in range(3)
]
s.set_output({'num_results': len(results)})
return results
def _analyze_sources(self, sources: List[Dict]) -> Dict:
with opswald.span('analyze-sources', kind='llm_call', model='gpt-4o') as s:
s.set_input({'num_sources': len(sources)})
# Simulate analysis
analysis = {
'insights': ['Insight 1', 'Insight 2'],
'confidence': 0.85,
'bias_detected': False
}
s.set_output(analysis)
return analysis
def _synthesize_findings(self, findings: List[Dict]) -> Dict:
with opswald.span('synthesize-findings', kind='llm_call', model='gpt-4o') as s:
s.set_input({
'num_findings': len(findings),
'avg_confidence': sum(f['confidence'] for f in findings) / len(findings)
})
response = client.chat.completions.create(
model="gpt-4o",
messages=[{
"role": "system",
"content": "Synthesize research findings into key themes and conclusions."
}, {
"role": "user",
"content": str(findings) # In practice, format this better
}],
temperature=0.4
)
synthesis = {
'themes': ['Theme 1', 'Theme 2'],
'conclusions': response.choices[0].message.content,
'evidence_strength': 'strong'
}
s.set_output(synthesis)
s.set_tokens(
input_tokens=response.usage.prompt_tokens,
output_tokens=response.usage.completion_tokens
)
return synthesis
def _generate_report(self, topic: str, synthesis: Dict) -> Dict:
with opswald.span('generate-final-report', kind='llm_call', model='gpt-4o') as s:
s.set_input({'topic': topic, 'synthesis': synthesis})
response = client.chat.completions.create(
model="gpt-4o",
messages=[{
"role": "system",
"content": "Generate a comprehensive research report based on the synthesis."
}, {
"role": "user",
"content": f"Topic: {topic}\nSynthesis: {synthesis}"
}],
temperature=0.2
)
report = {
'topic': topic,
'executive_summary': response.choices[0].message.content[:200],
'full_report': response.choices[0].message.content,
'research_quality': 'high',
'word_count': len(response.choices[0].message.content.split())
}
s.set_output(report)
s.set_tokens(
input_tokens=response.usage.prompt_tokens,
output_tokens=response.usage.completion_tokens
)
return report
# Usage
pipeline = ResearchPipeline()
report = pipeline.research_topic("Impact of AI on education")

Error Recovery and Fallback Patterns

Handle failures gracefully with comprehensive error tracking:

import opswald
from openai import OpenAI
from anthropic import Anthropic
opswald.init(api_key='your-key')
openai_client = OpenAI()
anthropic_client = Anthropic()
class RobustAgent:
def process_with_fallback(self, prompt: str) -> str:
with opswald.trace('robust-processing') as t:
try:
# Primary: OpenAI GPT-4
return self._try_openai(prompt)
except Exception as e:
with opswald.span('primary-failure', kind='error') as s:
s.set_error({'provider': 'openai', 'error': str(e)})
try:
# Fallback 1: Anthropic Claude
return self._try_anthropic(prompt)
except Exception as e2:
with opswald.span('fallback1-failure', kind='error') as s:
s.set_error({'provider': 'anthropic', 'error': str(e2)})
try:
# Fallback 2: Simplified processing
return self._simple_fallback(prompt)
except Exception as e3:
with opswald.span('final-fallback-failure', kind='error') as s:
s.set_error({'provider': 'local', 'error': str(e3)})
# Ultimate fallback: graceful degradation
return self._graceful_degradation()
def _try_openai(self, prompt: str) -> str:
with opswald.span('openai-attempt', kind='llm_call', provider='openai', model='gpt-4o') as s:
s.set_input({'prompt': prompt, 'strategy': 'primary'})
response = openai_client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": prompt}],
timeout=10 # Aggressive timeout
)
result = response.choices[0].message.content
s.set_output({'response': result, 'success': True})
s.set_tokens(
input_tokens=response.usage.prompt_tokens,
output_tokens=response.usage.completion_tokens
)
return result
def _try_anthropic(self, prompt: str) -> str:
with opswald.span('anthropic-attempt', kind='llm_call', provider='anthropic', model='claude-3-sonnet') as s:
s.set_input({'prompt': prompt, 'strategy': 'fallback1'})
response = anthropic_client.messages.create(
model="claude-3-sonnet-20240229",
max_tokens=1000,
messages=[{"role": "user", "content": prompt}],
timeout=15
)
result = response.content[0].text
s.set_output({'response': result, 'success': True})
s.set_metadata({'fallback_reason': 'openai_failure'})
return result
def _simple_fallback(self, prompt: str) -> str:
with opswald.span('simple-processing', kind='custom') as s:
s.set_input({'prompt': prompt, 'strategy': 'fallback2'})
# Simple keyword-based response
if 'weather' in prompt.lower():
result = "I can't access weather data right now, but you can check weather.com"
elif 'time' in prompt.lower():
import datetime
result = f"The current time is {datetime.datetime.now().strftime('%H:%M')}"
else:
result = "I'm experiencing some technical difficulties. Please try rephrasing your request."
s.set_output({'response': result, 'method': 'keyword_matching'})
s.set_metadata({'fallback_reason': 'all_llm_providers_failed'})
return result
def _graceful_degradation(self) -> str:
with opswald.span('graceful-degradation', kind='error') as s:
s.set_input({'strategy': 'ultimate_fallback'})
result = "I'm currently unable to process your request. Please try again later or contact support."
s.set_output({'response': result, 'degraded': True})
s.set_metadata({
'severity': 'critical',
'user_impact': 'service_unavailable',
'alert_required': True
})
return result
# Usage with comprehensive error tracking
agent = RobustAgent()
response = agent.process_with_fallback("What's the weather like?")

Decision Tree Agent

Track complex branching logic and decision points:

import opswald
from typing import Dict, Any
from openai import OpenAI
opswald.init(api_key='your-key')
client = OpenAI()
class DecisionTreeAgent:
def process_customer_request(self, request: str, customer_tier: str) -> Dict[str, Any]:
with opswald.trace('customer-service-decision-tree') as t:
# 1. Classify request urgency
urgency = self._classify_urgency(request)
# 2. Route based on customer tier and urgency
routing_decision = self._route_request(customer_tier, urgency, request)
# 3. Process according to routing decision
result = self._process_routed_request(request, routing_decision)
return {
'urgency': urgency,
'routing': routing_decision,
'result': result
}
def _classify_urgency(self, request: str) -> Dict[str, Any]:
with opswald.span('classify-urgency', kind='llm_call', model='gpt-4o') as s:
s.set_input({'request': request})
response = client.chat.completions.create(
model="gpt-4o",
messages=[{
"role": "system",
"content": """Classify customer request urgency. Return JSON:
{"urgency": "low|medium|high|critical", "confidence": 0.95, "keywords": ["refund", "urgent"], "reasoning": "explanation"}"""
}, {
"role": "user",
"content": request
}],
temperature=0.1
)
urgency_data = eval(response.choices[0].message.content)
s.set_output(urgency_data)
s.set_tokens(
input_tokens=response.usage.prompt_tokens,
output_tokens=response.usage.completion_tokens
)
# Log the decision reasoning
s.set_metadata({
'classification_confidence': urgency_data['confidence'],
'decision_factors': urgency_data['keywords'],
'threshold_met': urgency_data['confidence'] > 0.8
})
return urgency_data
def _route_request(self, tier: str, urgency: Dict, request: str) -> Dict[str, Any]:
with opswald.span('route-request', kind='custom') as s:
s.set_input({
'customer_tier': tier,
'urgency_level': urgency['urgency'],
'confidence': urgency['confidence']
})
# Decision tree logic
routing_decision = self._evaluate_routing_rules(tier, urgency, request)
s.set_output(routing_decision)
s.set_metadata({
'decision_path': routing_decision['path'],
'business_rule': routing_decision['rule_applied'],
'escalation_required': routing_decision.get('escalate', False)
})
return routing_decision
def _evaluate_routing_rules(self, tier: str, urgency: Dict, request: str) -> Dict[str, Any]:
with opswald.span('evaluate-business-rules', kind='custom') as s:
urgency_level = urgency['urgency']
confidence = urgency['confidence']
# Decision tree branches
if urgency_level == 'critical' or (urgency_level == 'high' and confidence > 0.9):
path = 'immediate-escalation'
handler = 'human-agent'
rule = 'CRITICAL_URGENT_RULE'
sla_hours = 1
elif tier == 'enterprise' and urgency_level in ['high', 'medium']:
path = 'enterprise-priority'
handler = 'senior-agent'
rule = 'ENTERPRISE_PRIORITY_RULE'
sla_hours = 4
elif tier == 'premium' and urgency_level == 'high':
path = 'premium-fast-track'
handler = 'dedicated-agent'
rule = 'PREMIUM_FAST_TRACK_RULE'
sla_hours = 8
elif urgency_level == 'low' and confidence > 0.85:
path = 'automated-resolution'
handler = 'chatbot'
rule = 'LOW_CONFIDENCE_AUTO_RULE'
sla_hours = 24
else:
path = 'standard-queue'
handler = 'general-agent'
rule = 'STANDARD_ROUTING_RULE'
sla_hours = 24
decision = {
'path': path,
'handler': handler,
'rule_applied': rule,
'sla_hours': sla_hours,
'escalate': path == 'immediate-escalation',
'automated': handler == 'chatbot'
}
s.set_input({
'tier': tier,
'urgency': urgency_level,
'confidence': confidence
})
s.set_output(decision)
# Capture decision tree path
s.set_metadata({
'decision_branch': f"{tier}_{urgency_level}_{confidence:.2f}",
'rule_priority': 1 if path == 'immediate-escalation' else 2 if 'premium' in path else 3,
'business_impact': 'high' if sla_hours <= 4 else 'medium' if sla_hours <= 8 else 'low'
})
return decision
def _process_routed_request(self, request: str, routing: Dict[str, Any]) -> Dict[str, Any]:
handler_type = routing['handler']
if handler_type == 'chatbot':
return self._automated_response(request, routing)
elif handler_type in ['human-agent', 'senior-agent', 'dedicated-agent']:
return self._human_agent_response(request, routing)
else:
return self._fallback_response(request, routing)
def _automated_response(self, request: str, routing: Dict) -> Dict[str, Any]:
with opswald.span('automated-resolution', kind='llm_call', model='gpt-3.5-turbo') as s:
s.set_input({'request': request, 'routing_path': routing['path']})
response = client.chat.completions.create(
model="gpt-3.5-turbo", # Faster model for automated responses
messages=[{
"role": "system",
"content": "Provide a helpful automated response to this customer request. Be concise and actionable."
}, {
"role": "user",
"content": request
}],
temperature=0.3
)
automated_response = response.choices[0].message.content
result = {
'response': automated_response,
'method': 'automated',
'escalation_path': 'Available if needed',
'resolution_time_minutes': 2
}
s.set_output(result)
s.set_tokens(
input_tokens=response.usage.prompt_tokens,
output_tokens=response.usage.completion_tokens
)
return result
def _human_agent_response(self, request: str, routing: Dict) -> Dict[str, Any]:
with opswald.span('human-agent-assignment', kind='custom') as s:
s.set_input({'request': request, 'agent_type': routing['handler']})
# Simulate human agent assignment
result = {
'status': 'assigned_to_human',
'agent_type': routing['handler'],
'ticket_id': f"TKT-{hash(request) % 10000:04d}",
'sla_hours': routing['sla_hours'],
'escalation_level': routing['path']
}
s.set_output(result)
s.set_metadata({
'queue_priority': 'high' if routing['escalate'] else 'normal',
'estimated_resolution_hours': routing['sla_hours'],
'resource_cost': 'high' if routing['handler'] == 'senior-agent' else 'medium'
})
return result
def _fallback_response(self, request: str, routing: Dict) -> Dict[str, Any]:
with opswald.span('fallback-processing', kind='error') as s:
s.set_error({'routing_failure': 'Unknown handler type', 'handler': routing.get('handler', 'unknown')})
result = {
'status': 'routing_error',
'fallback': 'Assigned to general queue',
'manual_review_required': True
}
s.set_output(result)
return result
# Usage
agent = DecisionTreeAgent()
result = agent.process_customer_request(
"URGENT: My payment failed and I need to access my account immediately for a business meeting!",
customer_tier="enterprise"
)
# View the complete decision path in the dashboard
print(f"Request routed to: {result['routing']['handler']}")
print(f"Decision path: {result['routing']['path']}")

These examples show how to use Opswald SDKs to debug complex AI workflows by capturing every decision point, error recovery path, and data transformation. The traces give you complete visibility into why your agents behaved the way they did.