Skip to main content

Model API Guide

This guide provides detailed documentation for the Hokusai Model API, including endpoints, authentication, and usage examples.

Overview

The Hokusai Model API provides programmatic access to models in the Hokusai ecosystem. For a high-level overview of using models, see Using Models.

Authentication

All API requests require authentication using an API key. See the Authentication documentation for the full guide, including key creation, rotation, and security best practices.

Quick Setup

from hokusai import HokusaiClient

client = HokusaiClient(api_key='your_api_key')

For detailed instructions on obtaining and managing API keys, see the Authentication Quickstart.

Wallet Connection

For token-based access, connect your wallet:

# Connect wallet
client.connect_wallet('your_wallet_address')

# Check connection status
status = client.get_wallet_status()
print(f"Connected: {status.connected}")
print(f"Balance: {status.balance}")

API Endpoints

Models

List Models

# Get all models
models = client.list_models()

# Filter models
filtered_models = client.list_models(
category='Medical',
min_reward=10000
)

Get Model Details

# Get model information
model = client.get_model('model_id')

# Available properties
print(f"Name: {model.name}")
print(f"Category: {model.category}")
print(f"Type: {model.type}")
print(f"Current Benchmark: {model.benchmark}")
print(f"DeltaOne Reward: {model.reward}")

Predictions

Single Prediction

# Basic prediction
try:
result = client.predict(
model_id='model_id',
input_data={
'text': 'Sample input text',
'parameters': {
'temperature': 0.7,
'max_tokens': 100
}
}
)

# Available properties
print(f"Prediction: {result.prediction}")
print(f"Confidence: {result.confidence}")
print(f"Latency: {result.latency}ms")
print(f"Fee Charged: {result.fee_charged} USDC")
except HokusaiError as e:
print(f"Error: {e.message}")

Batch Prediction

# Batch prediction
results = client.batch_predict(
model_id='model_id',
input_data=[
{'text': 'Input 1'},
{'text': 'Input 2'}
],
batch_size=10
)

# Process results
for result in results:
print(f"Input: {result.input}")
print(f"Prediction: {result.prediction}")
print(f"Confidence: {result.confidence}")

Usage and Analytics

Usage Statistics

# Get usage stats
stats = client.get_usage_stats(
model_id='model_id',
timeframe='last_30_days'
)

# Available metrics
print(f"Total Requests: {stats.total_requests}")
print(f"Successful Requests: {stats.successful_requests}")
print(f"Failed Requests: {stats.failed_requests}")
print(f"Average Latency: {stats.avg_latency}ms")
print(f"Total Fees Paid: {stats.fees_paid} USDC")

Cost Estimation

# Estimate costs
estimate = client.estimate_cost(
model_id='model_id',
request_count=1000,
input_size='medium'
)

# Cost details
print(f"Estimated Tokens: {estimate.tokens}")
print(f"Estimated Cost: {estimate.cost}")
print(f"Cost per Request: {estimate.cost_per_request}")

Model-Specific Endpoints

Medical Models

# Medical imaging model
result = client.predict(
model_id='chest-xray-diagnostic-v2',
input_data={
'image': image_data,
'parameters': {
'confidence_threshold': 0.95
}
}
)

# Medical text model
result = client.predict(
model_id='sepsis-prediction-lstm',
input_data={
'patient_data': patient_data,
'parameters': {
'time_window': '24h'
}
}
)
# Contract analysis
result = client.predict(
model_id='contract-clause-extractor',
input_data={
'contract_text': contract_text,
'parameters': {
'clause_types': ['liability', 'termination']
}
}
)

# Case summarization
result = client.predict(
model_id='case-law-summarizer',
input_data={
'case_text': case_text,
'parameters': {
'summary_length': 'medium'
}
}
)

Financial Models

# Market prediction
result = client.predict(
model_id='high-frequency-market-predictor',
input_data={
'market_data': market_data,
'parameters': {
'timeframe': '1h',
'confidence_threshold': 0.8
}
}
)

# Credit analysis
result = client.predict(
model_id='credit-default-classifier',
input_data={
'applicant_data': applicant_data,
'parameters': {
'risk_threshold': 0.7
}
}
)

Error Handling

Common Errors

try:
result = client.predict(
model_id='model_id',
input_data={'text': 'Sample input'}
)
except HokusaiError as e:
if e.code == 'INSUFFICIENT_TOKENS':
print("Insufficient tokens for prediction")
elif e.code == 'INVALID_INPUT':
print("Invalid input format")
elif e.code == 'MODEL_UNAVAILABLE':
print("Model is currently unavailable")
else:
print(f"Unexpected error: {e.message}")

Rate Limiting

# Handle rate limiting
try:
result = client.predict(...)
except RateLimitError as e:
print(f"Rate limit exceeded. Retry after {e.retry_after} seconds")
time.sleep(e.retry_after)
result = client.predict(...)

Best Practices

  1. Authentication

    • Store API keys securely
    • Rotate keys regularly
    • Use environment variables
  2. Performance

    • Use batch processing
    • Implement caching
    • Monitor latency
  3. Error Handling

    • Implement retry logic
    • Log errors properly
    • Handle rate limits
  4. Cost Management

    • Monitor token usage
    • Estimate costs
    • Optimize requests

Next Steps

For additional support, contact our Support Team or join our Community Forum.