# 1. Clone repository
git clone https://github.com/siddhant-k-code/contextlab.git
cd contextlab
# 2. Run setup script
./scripts/dev_env_setup.sh
# 3. Activate environment
source venv/bin/activate
# 4. Add API keys to .env
echo "OPENAI_API_KEY=your-key-here" >> .env# Create virtual environment
python3 -m venv venv
source venv/bin/activate
# Install dependencies
pip install -e ".[dev,docs,optimization]"
# Install pre-commit hooks
pre-commit install
# Run tests
make testimport asyncio
from contextlab import analyze
async def main():
report = await analyze(
text="Your text here",
model="gpt-4o-mini"
)
print(f"Analyzed {len(report.chunks)} chunks")
asyncio.run(main())# Analyze documents
contextlab analyze docs/*.md --model gpt-4o-mini
# Compress context
contextlab compress <run_id> --strategy hybrid --limit 8000
# Visualize results
contextlab viz <run_id># Start API
make api
# Or directly
uvicorn api.main:app --reload
# Test
curl http://localhost:8000/health# Install dependencies
cd web
npm install
# Start dev server
npm run dev
# Visit http://localhost:5173# API
docker build -f docker/api.Dockerfile -t contextlab-api .
# Web
docker build -f docker/web.Dockerfile -t contextlab-web .# API
docker run -p 8000:8000 --env-file .env contextlab-api
# Web
docker run -p 3000:3000 contextlab-web# docker-compose.yml
version: '3.8'
services:
api:
build:
context: .
dockerfile: docker/api.Dockerfile
ports:
- "8000:8000"
env_file:
- .env
volumes:
- ./data:/app/.contextlab
web:
build:
context: .
dockerfile: docker/web.Dockerfile
ports:
- "3000:3000"
depends_on:
- api
environment:
- API_URL=http://api:8000docker-compose upRequired:
OPENAI_API_KEY: OpenAI API key for embeddings/summarizationCONTEXTLAB_STORAGE_PATH: Path for storing analysis runs
Optional:
CONTEXTLAB_EMBEDDING_MODEL: Embedding model (default: text-embedding-3-small)API_HOST: API host (default: 0.0.0.0)API_PORT: API port (default: 8000)
- API Keys: Never commit API keys to git
- CORS: Configure CORS for production in
api/main.py - Authentication: Enable token-based auth if exposing publicly
- Rate Limiting: Add rate limiting middleware
- HTTPS: Use HTTPS in production
# Install dependencies
sudo apt update
sudo apt install python3.11 python3-pip nodejs npm
# Clone and setup
git clone https://github.com/siddhant-k-code/contextlab.git
cd contextlab
./scripts/dev_env_setup.sh
# Run with systemd
sudo cp deployment/contextlab-api.service /etc/systemd/system/
sudo systemctl enable contextlab-api
sudo systemctl start contextlab-api# k8s/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: contextlab-api
spec:
replicas: 3
selector:
matchLabels:
app: contextlab-api
template:
metadata:
labels:
app: contextlab-api
spec:
containers:
- name: api
image: ghcr.io/siddhant-k-code/contextlab/api:latest
ports:
- containerPort: 8000
env:
- name: OPENAI_API_KEY
valueFrom:
secretKeyRef:
name: contextlab-secrets
key: openai-api-keySee cloud-specific deployment guides in docs/deployment/.
# API health
curl http://localhost:8000/health
# Check metrics
curl http://localhost:8000/metricsConfigure logging level:
import logging
logging.basicConfig(level=logging.INFO)Key metrics to monitor:
- Request latency (p50, p95, p99)
- Error rate
- Token usage
- Storage size
- Embedding API calls
# Backup analysis runs
tar -czf contextlab-backup-$(date +%Y%m%d).tar.gz .contextlab/
# Backup to S3
aws s3 cp contextlab-backup-*.tar.gz s3://your-bucket/backups/# Restore from backup
tar -xzf contextlab-backup-20250115.tar.gzIssue: Import errors
# Solution: Reinstall package
pip install -e .Issue: API key not found
# Solution: Check .env file
cat .env | grep OPENAI_API_KEYIssue: Port already in use
# Solution: Use different port
uvicorn api.main:app --port 8001Issue: Database locked
# Solution: Close all connections and restart
rm .contextlab/contextlab.db-shm .contextlab/contextlab.db-wal# Enable debug logging
export CONTEXTLAB_DEV_MODE=true
# Run with verbose output
contextlab analyze docs/*.md --verbose- Deploy multiple API instances behind load balancer
- Use shared storage (S3/NFS) for analysis runs
- Use Redis for session/cache management
- PostgreSQL instead of SQLite for production
- Read replicas for visualization queries
- Regular vacuum and optimization
- Cache embeddings for frequently analyzed documents
- Cache compression results
- Use CDN for web UI assets
# Pull latest
git pull origin main
# Update dependencies
pip install -e ".[dev,docs,optimization]"
# Run migrations (if any)
python scripts/migrate.py
# Restart services
sudo systemctl restart contextlab-api# Clean up old runs
contextlab cleanup --days 30
# Optimize database
sqlite3 .contextlab/contextlab.db "VACUUM;"