forked from stanford-rc/slurm-spank-gpu_cmode
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathgpu_cmode.lua
181 lines (154 loc) · 5.2 KB
/
gpu_cmode.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
-- ============================================================================
-- SPANK plugin to allow users to choose the compute mode on the GPUs allocated
-- to their job. Requires `nvidia-smi` on the compute node, and the Slurm SPANK
-- Lua plugin.
--
-- Adds a --gpu_cmode=MODE option to srun/sbatch/salloc, with MODE:
-- 0: shared
-- 1: exclusive (exclusive_thread: deprecated, use 3)
-- 2: prohibited
-- 3: exclusive (exclusive_process)
--
-- Reference:
-- http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#compute-modes
--
-- # Author : Kilian Cavalotti <[email protected]>
-- # Created : 2018/01/22
-- # License : GPL 2.0
-- ============================================================================
--
-- constants ------------------------------------------------------------------
--
-- plugin name (for logging)
--
myname = "SPANK:gpu_cmode"
-- GPU compute modes definitions
--
valid_cmodes = {
[0]="shared",
[1]="exclusive",
[2]="prohibited"
}
-- reverse index
--
cmodes_index = {}
for k,v in pairs(valid_cmodes) do cmodes_index[v]=k end
-- default mode
-- GPUs will be reset to that mode at the end of the job
--
default_cmode = "exclusive"
-- define new --gpu_cmode option for srun/salloc/sbatch
--
spank_options = {
{
name = "gpu_cmode",
usage = "Set the GPU compute mode on the allocated GPUs to " ..
"shared, exclusive or prohibited. Default is " ..
default_cmode ,
arginfo = "<shared|exclusive|prohibited>",
has_arg = 1,
cb = "opt_handler"
},
}
--
-- functions ------------------------------------------------------------------
--
-- execute command and return output
--
function exec(cmd)
local handle = io.popen (cmd)
local result = handle:read("*a") or ""
handle:close()
result = string.gsub(result, "\n$", "")
return result
end
-- validate compute mode
--
function validate_cmode(cmode)
for _, value in pairs(valid_cmodes) do
if value == cmode then
return true
end
end
return false
end
-- check options
--
function opt_handler(val, optarg, isremote)
cmode = optarg
if isremote or validate_cmode(optarg) then
return SPANK.SUCCESS
end
return SPANK.FAILURE
end
-- Return last value of multivalue
-- Used to support os.execute on Lua 5.1 through 5.3 (and later, assuming cmd's
-- return value is the last value returned).
-- '#' returns index of last value.
function get_last_value(...)
return select(select('#', ...), ...)
end
--
-- SPANK functions ------------------------------------------------------------
-- cf. https://slurm.schedmd.com/spank.html
--
-- SPANK function, called after privileges are temporarily dropped.
-- needs to run as root, but in the job cgroup context, if any.
--
function slurm_spank_user_init(spank)
-- if context is not "remote" or compute mode is not defined, do nothing
if spank.context ~= "remote" or cmode == nil then
return SPANK.SUCCESS
end
-- get GPU ids from CUDA_VISIBLE_DEVICES
device_ids = spank:getenv("CUDA_VISIBLE_DEVICES")
if device_ids == nil or device_ids == "" then
SPANK.log_error(myname .. ": CUDA_VISIBLE_DEVICES not set.")
return SPANK.FAILURE
end
-- check for nvidia-smi
nvs_path = exec("which nvidia-smi")
if nvs_path:match("nvidia%-smi$") == nil then
SPANK.log_error(myname .. ": can't find nvidia-smi in PATH.")
return SPANK.FAILURE
end
-- set compute mode on GPUs
SPANK.log_info(myname .. ": changing compute mode to '%s' on GPU(s): %s\n",
cmode, device_ids)
local cmd = nvs_path .. " -c " .. cmodes_index[cmode] ..
" -i " .. device_ids
local ret = tonumber(get_last_value(os.execute(cmd)))
SPANK.log_debug(myname .. ": DEBUG: cmd = %s\n", cmd)
SPANK.log_debug(myname .. ": DEBUG: ret = %s\n", ret)
-- check return code
if ret ~= 0 then
SPANK.log_error(myname .. ": error setting compute mode go to '%s'" ..
" on GPU(s): %s\n", cmode, device_ids)
return SPANK.FAILURE
end
return SPANK.SUCCESS
end
-- SPANK function called for each task as its exit status is collected by Slurm
-- needs to run as root, in the job cgroup context, if any.
--
function slurm_spank_task_exit(spank)
-- if context is not "remote" or compute mode is not defined, do nothin'
if spank.context ~= "remote" or cmode == nil then
return SPANK.SUCCESS
end
-- reset compute mode on GPUs
SPANK.log_info(myname .. ": resetting compute mode to default '%s'" ..
" on GPU(s): %s\n", default_cmode, device_ids)
local cmd = nvs_path .. " -c " .. cmodes_index[default_cmode] ..
" -i " .. device_ids
local ret = tonumber(get_last_value(os.execute(cmd)))
SPANK.log_debug(myname .. ": DEBUG: cmd = %s\n", cmd)
SPANK.log_debug(myname .. ": DEBUG: ret = %s\n", ret)
-- check return
if ret ~= 0 then
SPANK.log_error(myname .. ": error resetting compute mode to default"..
" '%s' on GPU(s): %s\n", default_cmode, device_ids)
return SPANK.FAILURE
end
return SPANK.SUCCESS
end