-
Notifications
You must be signed in to change notification settings - Fork 23
Expand file tree
/
Copy pathrun-minikube-test.sh
More file actions
executable file
·38 lines (28 loc) · 1.12 KB
/
run-minikube-test.sh
File metadata and controls
executable file
·38 lines (28 loc) · 1.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
#!/bin/bash
set -euo pipefail
NAMESPACE=sparkop
JOB_NAME=test-runner
# Delete all sparkapplication resources that may be left over from the previous test runs.
kubectl delete sparkapplication --all -n sparkop || true
JOB_SPEC=$(dirname $0)/test_job.yaml
# Delete previous instance of the job if it exists
kubectl delete -n ${NAMESPACE} "job/$JOB_NAME" 2>/dev/null || true
# Create the job
kubectl apply -n ${NAMESPACE} -f "$JOB_SPEC"
# Wait for job to have a pod.
for i in {1..10}
do
POD=$(kubectl get pods -n ${NAMESPACE} --selector=job-name=$JOB_NAME --output=jsonpath='{.items[0].metadata.name}')
if [ ! -z "$POD" ]; then
break
else
sleep 1
fi
done
echo "Waiting for pod to be ready:"
kubectl wait -n ${NAMESPACE} --for=condition=ContainersReady "pod/$POD" --timeout=60s || true
echo "Job output:"
kubectl logs -n ${NAMESPACE} -f "job/$JOB_NAME"
# Can't wait for both conditions at once, so wait for complete first then wait for failure
kubectl wait -n ${NAMESPACE} --for=condition=complete "job/$JOB_NAME" --timeout=60s && exit 0
kubectl wait -n ${NAMESPACE} --for=condition=failure "job/$JOB_NAME" --timeout=60s && exit 1