1+ #! /bin/bash
2+ # SBATCH --job-name=pp_phase2
3+ # SBATCH --partition=rome
4+ # SBATCH --nodes=1
5+ # SBATCH --ntasks=1
6+ # SBATCH --cpus-per-task=32
7+ # SBATCH --time=04:00:00
8+ # SBATCH --mem=16G
9+ # SBATCH --output=pp_phase2_%j.out
10+ # SBATCH --error=pp_phase2_%j.err
11+
12+ # =============================================================================
13+ # PP Hydra Effect - Phase 2: Self-Organization (SOC Test)
14+ # =============================================================================
15+ #
16+ # PHASE 2: Test if prey_death evolves toward critical point
17+ # - 6 initial prey_death values × 30 reps = 180 simulations
18+ # - Longer runs (5000 steps) for evolution to equilibrate
19+ # - Tracks evolved_prey_death_timeseries
20+ #
21+ # SUBMIT: sbatch run_phase2.sh
22+ # MONITOR: squeue -u $USER
23+ # CANCEL: scancel <job_id>
24+ #
25+ # =============================================================================
26+
27+ echo " ========================================"
28+ echo " PP Hydra Effect - Phase 2"
29+ echo " ========================================"
30+ echo " Job ID: $SLURM_JOB_ID "
31+ echo " Node: $( hostname) "
32+ echo " CPUs: $SLURM_CPUS_PER_TASK "
33+ echo " Start: $( date) "
34+ echo " Working dir: $( pwd) "
35+ echo " ========================================"
36+
37+ # -----------------------------------------------------------------------------
38+ # Environment Setup
39+ # -----------------------------------------------------------------------------
40+
41+ source ~ /snellius_venv/bin/activate
42+
43+ # Prevent numpy/scipy from spawning extra threads (joblib handles parallelism)
44+ export OMP_NUM_THREADS=1
45+ export OPENBLAS_NUM_THREADS=1
46+ export MKL_NUM_THREADS=1
47+ export NUMEXPR_NUM_THREADS=1
48+
49+ # -----------------------------------------------------------------------------
50+ # Run Phase 2
51+ # -----------------------------------------------------------------------------
52+
53+ OUTPUT_DIR=" results/phase2_${SLURM_JOB_ID} "
54+ mkdir -p $OUTPUT_DIR
55+
56+ echo " "
57+ echo " Output directory: $OUTPUT_DIR "
58+ echo " "
59+
60+ # Dry run first to verify setup
61+ echo " Dry run check:"
62+ python3 -u scripts/experiments.py \
63+ --phase 2 \
64+ --output $OUTPUT_DIR \
65+ --cores $SLURM_CPUS_PER_TASK \
66+ --dry-run
67+
68+ echo " "
69+ echo " Starting Phase 2..."
70+ echo " "
71+
72+ # Run phase 2
73+ python3 -u scripts/experiments.py \
74+ --phase 2 \
75+ --output $OUTPUT_DIR \
76+ --cores $SLURM_CPUS_PER_TASK
77+
78+ # -----------------------------------------------------------------------------
79+ # Completion
80+ # -----------------------------------------------------------------------------
81+
82+ echo " "
83+ echo " ========================================"
84+ echo " Phase 2 Complete"
85+ echo " ========================================"
86+ echo " End time: $( date) "
87+ echo " Results in: $OUTPUT_DIR /"
88+ echo " "
89+ echo " Output files:"
90+ ls -lh $OUTPUT_DIR /
91+ echo " "
92+ echo " Next steps:"
93+ echo " 1. Download phase2_results.jsonl"
94+ echo " 2. Plot evolved_prey_death_final vs initial prey_death"
95+ echo " 3. Check if all runs converge to ~0.095-0.105 (critical point)"
96+ echo " 4. If SOC confirmed, proceed to Phase 3 (finite-size scaling)"
97+ echo " ========================================"
0 commit comments