Skip to content

Commit cb84e9d

Browse files
Stepan Rasputnygcf-owl-bot[bot]
andauthored
feat: create job with persistent disk sample (GoogleCloudPlatform#11898)
* feat: create job with persistent disk sample * Fix lint issues * fix: added existing disk attachment * fix: added existing disk as a volume * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: added test assert for second disk --------- Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
1 parent 31918eb commit cb84e9d

File tree

2 files changed

+158
-1
lines changed

2 files changed

+158
-1
lines changed
Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
# Copyright 2024 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
# [START batch_create_persistent_disk_job]
16+
from google.cloud import batch_v1
17+
18+
19+
def create_with_pd_job(
20+
project_id: str,
21+
region: str,
22+
job_name: str,
23+
disk_name: str,
24+
zone: str,
25+
existing_disk_name=None,
26+
) -> batch_v1.Job:
27+
"""
28+
This method shows how to create a sample Batch Job that will run
29+
a simple command on Cloud Compute instances with mounted persistent disk.
30+
31+
Args:
32+
project_id: project ID or project number of the Cloud project you want to use.
33+
region: name of the region you want to use to run the job. Regions that are
34+
available for Batch are listed on: https://cloud.google.com/batch/docs/get-started#locations
35+
job_name: the name of the job that will be created.
36+
It needs to be unique for each project and region pair.
37+
disk_name: name of the disk to be mounted for your Job.
38+
existing_disk_name(optional): existing disk name, which you want to attach to a job
39+
40+
Returns:
41+
A job object representing the job created.
42+
"""
43+
client = batch_v1.BatchServiceClient()
44+
45+
# Define what will be done as part of the job.
46+
task = batch_v1.TaskSpec()
47+
runnable = batch_v1.Runnable()
48+
runnable.script = batch_v1.Runnable.Script()
49+
runnable.script.text = (
50+
"echo Hello world from task ${BATCH_TASK_INDEX}. >> /mnt/disks/"
51+
+ disk_name
52+
+ "/output_task_${BATCH_TASK_INDEX}.txt"
53+
)
54+
task.runnables = [runnable]
55+
task.max_retry_count = 2
56+
task.max_run_duration = "3600s"
57+
58+
volume = batch_v1.Volume()
59+
volume.device_name = disk_name
60+
volume.mount_path = f"/mnt/disks/{disk_name}"
61+
task.volumes = [volume]
62+
63+
if existing_disk_name:
64+
volume2 = batch_v1.Volume()
65+
volume2.device_name = existing_disk_name
66+
volume2.mount_path = f"/mnt/disks/{existing_disk_name}"
67+
task.volumes.append(volume2)
68+
69+
# Tasks are grouped inside a job using TaskGroups.
70+
# Currently, it's possible to have only one task group.
71+
group = batch_v1.TaskGroup()
72+
group.task_count = 4
73+
group.task_spec = task
74+
75+
disk = batch_v1.AllocationPolicy.Disk()
76+
# The disk type of the new persistent disk, either pd-standard,
77+
# pd-balanced, pd-ssd, or pd-extreme. For Batch jobs, the default is pd-balanced
78+
disk.type_ = "pd-balanced"
79+
disk.size_gb = 10
80+
81+
# Policies are used to define on what kind of virtual machines the tasks will run on.
82+
# Read more about local disks here: https://cloud.google.com/compute/docs/disks/persistent-disks
83+
policy = batch_v1.AllocationPolicy.InstancePolicy()
84+
policy.machine_type = "n1-standard-1"
85+
86+
attached_disk = batch_v1.AllocationPolicy.AttachedDisk()
87+
attached_disk.new_disk = disk
88+
attached_disk.device_name = disk_name
89+
policy.disks = [attached_disk]
90+
91+
if existing_disk_name:
92+
attached_disk2 = batch_v1.AllocationPolicy.AttachedDisk()
93+
attached_disk2.existing_disk = (
94+
f"projects/{project_id}/zones/{zone}/disks/{existing_disk_name}"
95+
)
96+
attached_disk2.device_name = existing_disk_name
97+
policy.disks.append(attached_disk2)
98+
99+
instances = batch_v1.AllocationPolicy.InstancePolicyOrTemplate()
100+
instances.policy = policy
101+
102+
allocation_policy = batch_v1.AllocationPolicy()
103+
allocation_policy.instances = [instances]
104+
105+
location = batch_v1.AllocationPolicy.LocationPolicy()
106+
location.allowed_locations = [f"zones/{zone}"]
107+
allocation_policy.location = location
108+
109+
job = batch_v1.Job()
110+
job.task_groups = [group]
111+
job.allocation_policy = allocation_policy
112+
job.labels = {"env": "testing", "type": "script"}
113+
114+
create_request = batch_v1.CreateJobRequest()
115+
create_request.job = job
116+
create_request.job_id = job_name
117+
# The job's parent is the region in which the job will run
118+
create_request.parent = f"projects/{project_id}/locations/{region}"
119+
120+
return client.create_job(create_request)
121+
122+
123+
# [END batch_create_persistent_disk_job]
124+
125+
if __name__ == "__main__":
126+
import google.auth
127+
128+
PROJECT = google.auth.default()[1]
129+
REGION = "europe-west4"
130+
ZONE = "europe-west4-c"
131+
job = create_with_pd_job(PROJECT, REGION, "pd-job-batch", "pd-1", ZONE)
132+
print(job)

batch/tests/test_basics.py

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515

1616
from collections.abc import Callable
1717
import time
18+
from typing import Tuple
1819
import uuid
1920

2021
from flaky import flaky
@@ -26,6 +27,7 @@
2627

2728
from ..create.create_with_container_no_mounting import create_container_job
2829
from ..create.create_with_gpu_no_mounting import create_gpu_job
30+
from ..create.create_with_persistent_disk import create_with_pd_job
2931
from ..create.create_with_script_no_mounting import create_script_job
3032
from ..create.create_with_service_account import create_with_custom_service_account_job
3133
from ..create.create_with_ssd import create_local_ssd_job
@@ -40,6 +42,7 @@
4042
PROJECT = google.auth.default()[1]
4143
REGION = "europe-central2"
4244
ZONE = "europe-central2-b"
45+
4346
TIMEOUT = 600 # 10 minutes
4447

4548
WAIT_STATES = {
@@ -68,7 +71,7 @@ def service_account() -> str:
6871

6972
@pytest.fixture
7073
def disk_name():
71-
return f"test-ssd-{uuid.uuid4().hex[:10]}"
74+
return f"test-disk-{uuid.uuid4().hex[:10]}"
7275

7376

7477
def _test_body(test_job: batch_v1.Job, additional_test: Callable = None, region=REGION):
@@ -108,6 +111,12 @@ def _check_tasks(job_name):
108111
print("Tasks tested")
109112

110113

114+
def _check_policy(job: batch_v1.Job, job_name: str, disk_names: Tuple[str]):
115+
assert job_name in job.name
116+
assert job.allocation_policy.instances[0].policy.disks[0].device_name in disk_names
117+
assert job.allocation_policy.instances[0].policy.disks[1].device_name in disk_names
118+
119+
111120
def _check_logs(job, capsys):
112121
print_job_logs(PROJECT, job)
113122
output = [
@@ -155,3 +164,19 @@ def test_service_account_job(job_name, service_account):
155164
def test_ssd_job(job_name: str, disk_name: str, capsys: "pytest.CaptureFixture[str]"):
156165
job = create_local_ssd_job(PROJECT, REGION, job_name, disk_name)
157166
_test_body(job, additional_test=lambda: _check_logs(job, capsys))
167+
168+
169+
@flaky(max_runs=3, min_passes=1)
170+
def test_pd_job(job_name, disk_name):
171+
region = "europe-north1"
172+
zone = "europe-north1-c"
173+
existing_disk_name = "permanent-batch-testing"
174+
job = create_with_pd_job(
175+
PROJECT, region, job_name, disk_name, zone, existing_disk_name
176+
)
177+
disk_names = (disk_name, existing_disk_name)
178+
_test_body(
179+
job,
180+
additional_test=lambda: _check_policy(job, job_name, disk_names),
181+
region=region,
182+
)

0 commit comments

Comments
 (0)