-
Notifications
You must be signed in to change notification settings - Fork 0
/
job_single_random.yaml
111 lines (111 loc) · 3.58 KB
/
job_single_random.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
apiVersion: v1
kind: Pod
metadata:
# Name of the pod. This is the name to use in any interaction with kubectl
name: singlerandom
spec:
securityContext:
# You need to specify your user ID here. You can get this by running "id -u"
runAsUser: 1167
volumes:
# The following 3 entries are the NFS mounts available to you. You do not need to
# modify them. They are for your home folder, and the shared tools and datasets
# folders.
- name: home
persistentVolumeClaim:
claimName: home
- name: tools
persistentVolumeClaim:
claimName: tools
# - name: data1
# persistentVolumeClaim:
# claimName: data1
# - name: data2
# persistentVolumeClaim:
# claimName: data2
# - name: data3
# persistentVolumeClaim:
# claimName: data3
- name: scratch1
persistentVolumeClaim:
claimName: scratch1
# - name: scratch2
# persistentVolumeClaim:
# claimName: scratch2
# The following 3 entries make the Nvidia drivers available. You do not need to
# modify them.
- hostPath:
path: /usr/lib/nvidia-driver/bin
name: nvbin
- hostPath:
path: /usr/lib/nvidia-driver
name: nvlib
- hostPath:
path: /usr/lib/
name: usrlib
- hostPath:
path: /usr/bin
name: bin
- hostPath:
path: /lib
name: lib
# You can specify a label indicating a specific model of GPU here when you need it.
# Leave it commented when learning how to use the cluster.
# nodeSelector:
# gputype: k40
# This part specifies the Docker container in which your code will run.
containers:
- name: singlerandom # The container also has a name but it ususally doesn't matter
# This is the Docker image on which your container is based. Leave it unchanged.
image: ubuntu:16.04
# This is the command which is run once the container is created. Point it to
# your code. It is best to encapsulate your commands in a shell script and
# call that script like below.
#
# Note that all paths have to be according to the filesystem inside the container.
command: ["/bin/bash", "/storage/home/aravindvenu/single_random/run.sh"]
# This section specifies the resources you ask for.
resources:
# For the purposes of using the cluster, the "requests" section should be the
# same as the "limits" section except for the line for GPU
limits:
alpha.kubernetes.io/nvidia-gpu: 1 # Number of GPUs
# RAM can be specified in either MiB (1024x1024 bytes) (ex. "500Mi")
# or GiB (ex. "4Gi")
memory: "8Gi"
cpu: "2" # Number of CPU cores
requests:
memory: "8Gi"
cpu: "2"
# Here, you can specify where you want the above directories to be mounted
# within the container. It is recommended to use this configuration as
# it mirrors what is present on the master machine.
volumeMounts:
# Entry for your home folder. Make sure to replace <username> with your
# username.
- mountPath: /storage/home/aravindvenu
name: home
# Entry for the other folders mentioned above.
- mountPath: /tools
name: tools
# - mountPath: /datasets/data1
# name: data1
# - mountPath: /datasets/data2
# name: data2
# - mountPath: /datasets/data3
# name: data3
- mountPath: /scratch/scratch1
name: scratch1
# - mountPath: /scratch/scratch2
# name: scratch2
- mountPath: /usr/local/nvidia/bin
name: nvbin
- mountPath: /usr/local/nvidia/lib
name: nvlib
- mountPath: /usr/lib/
name: usrlib
- mountPath: /usr/bin/
name: bin
- mountPath: /lib
name: lib
restartPolicy: Never