forked from DreamLab-AI/origin-logseq-AR
-
Notifications
You must be signed in to change notification settings - Fork 19
Expand file tree
/
Copy pathstart.sh
More file actions
executable file
·111 lines (100 loc) · 3.89 KB
/
start.sh
File metadata and controls
executable file
·111 lines (100 loc) · 3.89 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#!/bin/bash
set -euo pipefail
# Function to log messages with timestamps
log() {
echo "[$(date "+%Y-%m-%d %H:%M:%S")] $1"
}
# Check for GPU environment variables
log "Checking GPU environment variables..."
if [ -z "${NVIDIA_GPU_UUID:-}" ]; then
# Try to auto-detect the first available GPU
if command -v nvidia-smi &>/dev/null; then
DETECTED_UUID=$(nvidia-smi --query-gpu=uuid --format=csv,noheader | head -n1)
if [ -n "$DETECTED_UUID" ]; then
NVIDIA_GPU_UUID="$DETECTED_UUID"
log "Auto-detected GPU UUID: $NVIDIA_GPU_UUID"
export NVIDIA_GPU_UUID
else
log "ERROR: No GPU detected by nvidia-smi"
log "Please set NVIDIA_GPU_UUID environment variable or ensure GPU is available"
exit 1
fi
else
log "ERROR: nvidia-smi not available and NVIDIA_GPU_UUID not set"
log "Please either:"
log " 1. Set NVIDIA_GPU_UUID environment variable"
log " 2. Ensure NVIDIA drivers are installed"
log " 3. Run without GPU by setting ENABLE_GPU_PHYSICS=false"
exit 1
fi
# Also set NVIDIA_VISIBLE_DEVICES to ensure Docker uses this GPU
if [ -z "${NVIDIA_VISIBLE_DEVICES:-}" ]; then
export NVIDIA_VISIBLE_DEVICES="$NVIDIA_GPU_UUID"
log "Setting NVIDIA_VISIBLE_DEVICES to: $NVIDIA_VISIBLE_DEVICES"
fi
# For older CUDA versions, also set CUDA_VISIBLE_DEVICES
if [ -z "${CUDA_VISIBLE_DEVICES:-}" ]; then
# Use device index 0 since NVIDIA_VISIBLE_DEVICES will map to this
export CUDA_VISIBLE_DEVICES="0"
log "Setting CUDA_VISIBLE_DEVICES to: $CUDA_VISIBLE_DEVICES"
fi
else
log "Using provided GPU UUID: $NVIDIA_GPU_UUID"
fi
# Parse command line arguments
START_WEBXR=true
if [ $# -gt 0 ] && [ "$1" = "--no-webxr" ]; then
START_WEBXR=false
fi
# Verify settings file permissions and ensure accessibility
log "Verifying settings.yaml permissions..."
# Ensure the file is accessible by the current user before checking existence
if [ -f "/app/settings.yaml" ]; then
chmod 666 /app/settings.yaml
log "settings.yaml permissions set to 666"
else
log "Error: settings.yaml not found at /app/settings.yaml"
exit 1
fi
log "settings.yaml permissions verified"
# Set up runtime environment
# Start nginx
log "Starting nginx..."
nginx -t && nginx
log "nginx started successfully"
# Execute the webxr binary only if not in debug mode
if [ "$START_WEBXR" = true ]; then
log "Preparing to execute webxr with extended GPU diagnostics..."
log "GPU information:"
if command -v nvidia-smi &>/dev/null; then
nvidia-smi
# Get device uuid to verify it matches our expected value
UUID=$(nvidia-smi --query-gpu=uuid --format=csv,noheader)
log "GPU UUID detected by nvidia-smi: $UUID"
else
log "WARNING: nvidia-smi not available - this may indicate NVIDIA driver issues"
fi
# Verify that PTX file exists and is readable (unified kernel)
if [ -f "/app/src/utils/ptx/visionflow_unified.ptx" ]; then
PTX_SIZE=$(stat -c%s "/app/src/utils/ptx/visionflow_unified.ptx")
log "✅ PTX file exists and is readable (size: $PTX_SIZE bytes)"
else
log "⚠️ PTX file NOT found at /app/src/utils/ptx/visionflow_unified.ptx"
log "This should have been generated by build.rs during compilation"
fi
# Check CUDA visibility
if [ -n "${CUDA_VISIBLE_DEVICES:-}" ]; then
log "CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES"
else
# If not set, explicitly set it to ensure CUDA can see device
export CUDA_VISIBLE_DEVICES=0
log "Explicitly setting CUDA_VISIBLE_DEVICES=0"
fi
# Always enable GPU debugging to ensure physics simulation runs
log "Starting webxr with GPU compute enabled"
exec /app/webxr --gpu-debug
else
log "Skipping webxr execution (debug mode)"
# Keep the container running
tail -f /dev/null
fi