forked from LLNL/magpie
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmagpie-run
executable file
·249 lines (213 loc) · 9.38 KB
/
magpie-run
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
#!/bin/bash
#############################################################################
# Copyright (C) 2013-2015 Lawrence Livermore National Security, LLC.
# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
# Written by Albert Chu <[email protected]>
# LLNL-CODE-644248
#
# This file is part of Magpie, scripts for running Hadoop on
# traditional HPC systems. For details, see https://github.com/llnl/magpie.
#
# Magpie is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Magpie is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Magpie. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
# This script is the core processing script for setting up daemons and
# running jobs. For the most part, it shouldn't be editted. See
# job submission files for configuration details.
source ${MAGPIE_SCRIPTS_HOME}/magpie/exports/magpie-exports-submission-type
source ${MAGPIE_SCRIPTS_HOME}/magpie/exports/magpie-exports-dirs
source ${MAGPIE_SCRIPTS_HOME}/magpie/exports/magpie-exports-user
source ${MAGPIE_SCRIPTS_HOME}/magpie/lib/magpie-lib-node-identification
source ${MAGPIE_SCRIPTS_HOME}/magpie/lib/magpie-lib-run
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-hadoop
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-hbase
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-kafka
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-mahout
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-pig
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-phoenix
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-spark
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-storm
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-tachyon
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-zeppelin
source ${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-project-zookeeper
if ! Magpie_am_I_master
then
exit 0
fi
# Initially make variables specific to node
Magpie_make_all_local_dirs_node_specific
# Output some general info
echo "*******************************************************"
echo "* Magpie General Job Info"
echo "*"
echo "* Job Nodelist: ${MAGPIE_NODELIST}"
echo "* Job Nodecount: ${MAGPIE_NODE_COUNT}"
echo "* Job Timelimit in Minutes: ${MAGPIE_TIMELIMIT_MINUTES}"
echo "* Job Name: ${MAGPIE_JOB_NAME}"
echo "* Job ID: ${MAGPIE_JOB_ID}"
echo "*"
echo "*******************************************************"
if [ "${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT_SHELL}X" != "X" ]
then
MAGPIE_SHELL="${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT_SHELL}"
else
MAGPIE_SHELL="${SHELL}"
fi
if [ "${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}X" != "X" ]
then
if [ -f "${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}" ]
then
rm -f ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
fi
touch ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
chmod 700 ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
echo "#!${MAGPIE_SHELL}" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
echo "" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
echo "# Common environment variables for Job = ${MAGPIE_JOB_NAME}, Job ID = ${MAGPIE_JOB_ID}" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
echo "" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
if [ "${JAVA_HOME}X" != "X" ]
then
if echo $MAGPIE_SHELL | grep -q csh
then
echo "setenv JAVA_HOME \"${JAVA_HOME}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
else
echo "export JAVA_HOME=\"${JAVA_HOME}\"" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
fi
echo "" >> ${MAGPIE_ENVIRONMENT_VARIABLE_SCRIPT}
fi
fi
# Global flag for setup check, will be set to false if any startup fails
magpie_run_prior_startup_successful=true
# Global, will be set/adjusted by various start functions
magpie_run_total_sleep_wait=0
# Zookeeper setup must come first, as other things like Hbase & Storm require it
# Will set magpie_run_zookeeper_should_be_torndown & magpie_run_zookeeper_setup_successful appropriately
Magpie_run_start_zookeeper
# Will set magpie_run_hadoop_should_be_torndown & magpie_run_hadoop_setup_successful appropriately
Magpie_run_start_hadoop
# After Hadoop setup, requires Hadoop
Magpie_run_start_pig
# After Hadoop setup, requires Hadoop
Magpie_run_start_mahout
# After Zookeeper setup, requires Zookeeper
# Will set magpie_run_hbase_should_be_torndown & magpie_run_hbase_setup_successful appropriately
Magpie_run_start_hbase
# After Hbase setup, requires Hbase
# Will set magpie_run_phoenix_should_be_torndown & magpie_run_phoenix_setup_successful appropriately
Magpie_run_start_phoenix
# Will set magpie_run_spark_should_be_torndown & magpie_run_spark_setup_successful appropriately
Magpie_run_start_spark
# Will set magpie_run_kafka_should_be_torndown & magpie_run_kafka_setup_successful appropriately
Magpie_run_start_kafka
# After Spark setup, requires Spark
# Will set magpie_run_zeppelin_should_be_torndown & magpie_run_zeppelin_setup_successful appropriately
Magpie_run_start_zeppelin
# After Zookeeper setup, requires Zookeeper
# Will set magpie_run_storm_should_be_torndown & magpie_run_storm_setup_successful appropriately
Magpie_run_start_storm
# After Hadoop setup, requires HDFS for formatting
# Will set magpie_run_tachyon_should_be_torndown & magpie_run_tachyon_setup_successful appropriately
Magpie_run_start_tachyon
# Make sure all setup passed
if [ "${magpie_run_zookeeper_setup_successful}" == "1" ] \
&& [ "${magpie_run_hadoop_setup_successful}" == "1" ] \
&& [ "${magpie_run_hbase_setup_successful}" == "1" ] \
&& [ "${magpie_run_phoenix_setup_successful}" == "1" ] \
&& [ "${magpie_run_spark_setup_successful}" == "1" ] \
&& [ "${magpie_run_kafka_setup_successful}" == "1" ] \
&& [ "${magpie_run_zeppelin_setup_successful}" == "1" ] \
&& [ "${magpie_run_storm_setup_successful}" == "1" ] \
&& [ "${magpie_run_tachyon_setup_successful}" == "1" ]
then
if [ "${MAGPIE_JOB_TYPE}" == "script" ]
then
echo "*******************************************************"
echo "* Executing script $MAGPIE_SCRIPT_PATH $MAGPIE_SCRIPT_ARGS"
echo "*******************************************************"
${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-execute script ${MAGPIE_SCRIPT_PATH} ${MAGPIE_SCRIPT_ARGS} &
scriptpid=$!
Magpie_wait_script_sigusr2_on_job_timeout ${scriptpid}
elif [ "${MAGPIE_JOB_TYPE}" == "interactive" ]
then
echo "*******************************************************"
echo "* Entering Magpie ${MAGPIE_JOB_TYPE} mode"
echo "*******************************************************"
${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-execute interactive &
scriptpid=$!
wait $scriptpid
elif [ "${MAGPIE_JOB_TYPE}" == "pig" ]
then
Magpie_run_pig
elif [ "${MAGPIE_JOB_TYPE}" == "mahout" ]
then
Magpie_run_mahout
elif [ "${MAGPIE_JOB_TYPE}" == "hadoop" ]
then
Magpie_run_hadoop
elif [ "${MAGPIE_JOB_TYPE}" == "hbase" ]
then
Magpie_run_hbase
elif [ "${MAGPIE_JOB_TYPE}" == "phoenix" ]
then
Magpie_run_phoenix
elif [ "${MAGPIE_JOB_TYPE}" == "spark" ]
then
Magpie_run_spark
elif [ "${MAGPIE_JOB_TYPE}" == "kafka" ]
then
Magpie_run_kafka
elif [ "${MAGPIE_JOB_TYPE}" == "zeppelin" ]
then
Magpie_run_zeppelin
elif [ "${MAGPIE_JOB_TYPE}" == "storm" ]
then
Magpie_run_storm
elif [ "${MAGPIE_JOB_TYPE}" == "tachyon" ]
then
Magpie_run_tachyon
elif [ "${MAGPIE_JOB_TYPE}" == "zookeeper" ]
then
Magpie_run_zookeeper
elif [ "${MAGPIE_JOB_TYPE}" == "testall" ]
then
echo "*******************************************************"
echo "* Running Magpie TestAll"
echo "*******************************************************"
${MAGPIE_SCRIPTS_HOME}/magpie/run/magpie-run-execute script ${MAGPIE_SCRIPTS_HOME}/magpie/job/magpie-job-magpie-testall &
scriptpid=$!
Magpie_wait_script_sigusr2_on_job_timeout ${scriptpid}
fi
fi
# Tachyon before Hadoop shutdown, may need to flush
# Sets magpie_run_tachyon_teardown_complete if teardown done
Magpie_run_stop_tachyon
# Sets magpie_run_storm_teardown_complete if teardown done
Magpie_run_stop_storm
# Before Spark, depends on Spark
# Sets magpie_run_zeppelin_teardown_complete if teardown done
Magpie_run_stop_zeppelin
# Sets magpie_run_kafka_teardown_complete if teardown done
Magpie_run_stop_kafka
# Sets magpie_run_spark_teardown_complete if teardown done
Magpie_run_stop_spark
# Before Hbase, depends on Hbase
# Sets magpie_run_phoenix_teardown_complete if teardown done
Magpie_run_stop_phoenix
# Sets magpie_run_hbase_teardown_complete if teardown done
Magpie_run_stop_hbase
# Sets magpie_run_hadoop_teardown_complete if teardown done
Magpie_run_stop_hadoop
# Zookeeper teardown comes last, as other things like Hbase & Storm require it
# Sets magpie_run_zookeeper_teardown_complete if teardown done
Magpie_run_stop_zookeeper
exit 0