From a5876785aa8153206044d72712a65959a681552e Mon Sep 17 00:00:00 2001 From: Sukrit Kalra Date: Wed, 24 Jan 2024 17:45:21 -0800 Subject: [PATCH] Remove unused and redundant scripts. --- ...un_alibaba_experiments_osdi_11_30_night.sh | 134 ------------- ...baba_experiments_osdi_11_30_night_jayne.sh | 132 ------------- ...05_max_dead_var_200_400_800_cv2_1_2_4_6.sh | 133 ------------- ...n_alibaba_experiments_osdi_12_1_evening.sh | 135 -------------- ...alibaba_experiments_osdi_12_1_evening_2.sh | 137 -------------- ...a_experiments_osdi_12_1_evening_2_jayne.sh | 136 -------------- ...alibaba_experiments_osdi_12_1_evening_3.sh | 137 -------------- ...a_experiments_osdi_12_1_evening_3_jayne.sh | 136 -------------- ...aba_experiments_osdi_12_1_evening_jayne.sh | 136 -------------- .../run_alibaba_experiments_osdi_12_1_noon.sh | 134 ------------- ...libaba_experiments_osdi_12_1_noon_jayne.sh | 134 ------------- ...alibaba_experiments_osdi_12_2_afternoon.sh | 136 -------------- ...a_experiments_osdi_12_2_afternoon_jayne.sh | 136 -------------- ...n_alibaba_experiments_osdi_12_2_morning.sh | 136 -------------- ...aba_experiments_osdi_12_2_morning_jayne.sh | 136 -------------- ...run_alibaba_experiments_osdi_12_2_night.sh | 136 -------------- ...alibaba_experiments_osdi_12_2_night_2.5.sh | 141 -------------- ...ents_osdi_12_2_night_2.5_heter_simontam.sh | 149 --------------- ...n_alibaba_experiments_osdi_12_2_night_2.sh | 136 -------------- ...iments_osdi_12_2_night_2_heter_simontam.sh | 144 -------------- ...eriments_osdi_12_2_night_heter_simontam.sh | 144 -------------- ...ibaba_experiments_osdi_12_2_night_jayne.sh | 141 -------------- ...libaba_experiments_osdi_12_2_noon_jayne.sh | 137 -------------- ...iments_osdi_12_3_morning_heter_simontam.sh | 149 --------------- ...ibaba_experiments_osdi_12_3_night_jayne.sh | 142 -------------- ...ba_experiments_osdi_12_3_night_rivertam.sh | 142 -------------- ...ba_experiments_osdi_12_3_night_simontam.sh | 142 -------------- ...ibaba_experiments_osdi_12_3_night_sysml.sh | 142 -------------- ..._evening_sysml_test_optimization_passes.sh | 148 --------------- ...vening_sysml_test_optimization_passes_2.sh | 149 --------------- ...t_optimization_passes_2_no_optimization.sh | 148 --------------- ...night_alind_homo_dynamic_discretization.sh | 168 ----------------- ...xperiments_osdi_12_4_night_jayne_hetero.sh | 154 --------------- ...riments_osdi_12_4_night_rivertam_hetero.sh | 154 --------------- ...eriments_osdi_12_4_night_sheperd_hetero.sh | 154 --------------- ...periments_osdi_12_4_night_simontam_homo.sh | 153 --------------- ..._experiments_osdi_12_4_night_sysml_homo.sh | 153 --------------- ...iments_osdi_12_5_afternoon_jayne_hetero.sh | 154 --------------- ...eriments_osdi_12_5_afternoon_sysml_homo.sh | 153 --------------- ...eriments_osdi_12_5_evening_jayne_hetero.sh | 154 --------------- ...sdi_12_5_evening_sysml_hetero_fix_gamma.sh | 159 ---------------- ...vening_sysml_hetero_fix_gamma_w_dynamic.sh | 165 ---------------- ..._night_jayne_hetero_fix_gamma_w_dynamic.sh | 171 ----------------- ...ght_rivertam_hetero_fix_gamma_w_dynamic.sh | 171 ----------------- ..._night_sheperd_homo_fix_gamma_w_dynamic.sh | 171 ----------------- ...night_simontam_homo_fix_gamma_w_dynamic.sh | 171 ----------------- ...night_sysml_02_homo_fix_gamma_w_dynamic.sh | 171 ----------------- ...ght_sysml_05_hetero_fix_gamma_w_dynamic.sh | 171 ----------------- ...vertam_hetero_fix_gamma_w_dynamic_max10.sh | 172 ----------------- ...day_sheperd_homo_fix_gamma_w_dynamic_10.sh | 172 ----------------- ..._sysml_02_homo_fix_gamma_w_dynamic_test.sh | 171 ----------------- ..._night_jayne_hetero_fix_gamma_w_dynamic.sh | 173 ----------------- ...ght_rivertam_hetero_fix_gamma_w_dynamic.sh | 173 ----------------- ..._night_sheperd_homo_fix_gamma_w_dynamic.sh | 173 ----------------- ...night_simontam_homo_fix_gamma_w_dynamic.sh | 173 ----------------- ...night_sysml_02_homo_fix_gamma_w_dynamic.sh | 176 ------------------ ...ght_sysml_05_hetero_fix_gamma_w_dynamic.sh | 176 ------------------ 57 files changed, 8624 deletions(-) delete mode 100755 scripts/run_alibaba_experiments_osdi_11_30_night.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_11_30_night_jayne.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_11_30_noon_arrival_rate_0.001_0.005_max_dead_var_200_400_800_cv2_1_2_4_6.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_1_evening.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_1_evening_2.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_1_evening_2_jayne.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_1_evening_3.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_1_evening_3_jayne.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_1_evening_jayne.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_1_noon.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_1_noon_jayne.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_afternoon.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_afternoon_jayne.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_morning.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_morning_jayne.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_night.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_night_2.5.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_night_2.5_heter_simontam.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_night_2.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_night_2_heter_simontam.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_night_heter_simontam.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_night_jayne.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_2_noon_jayne.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_3_morning_heter_simontam.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_3_night_jayne.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_3_night_rivertam.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_3_night_simontam.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_3_night_sysml.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes_2.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes_2_no_optimization.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_4_night_alind_homo_dynamic_discretization.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_4_night_jayne_hetero.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_4_night_rivertam_hetero.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_4_night_sheperd_hetero.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_4_night_simontam_homo.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_4_night_sysml_homo.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_afternoon_jayne_hetero.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_afternoon_sysml_homo.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_evening_jayne_hetero.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_evening_sysml_hetero_fix_gamma.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_evening_sysml_hetero_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_night_jayne_hetero_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_night_rivertam_hetero_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_night_sheperd_homo_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_night_simontam_homo_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_night_sysml_02_homo_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_5_night_sysml_05_hetero_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_6_day_rivertam_hetero_fix_gamma_w_dynamic_max10.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_6_day_sheperd_homo_fix_gamma_w_dynamic_10.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_6_morning_sysml_02_homo_fix_gamma_w_dynamic_test.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_7_night_jayne_hetero_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_7_night_rivertam_hetero_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_7_night_sheperd_homo_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_7_night_simontam_homo_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_7_night_sysml_02_homo_fix_gamma_w_dynamic.sh delete mode 100755 scripts/run_alibaba_experiments_osdi_12_7_night_sysml_05_hetero_fix_gamma_w_dynamic.sh diff --git a/scripts/run_alibaba_experiments_osdi_11_30_night.sh b/scripts/run_alibaba_experiments_osdi_11_30_night.sh deleted file mode 100755 index 9b6c1f6b..00000000 --- a/scripts/run_alibaba_experiments_osdi_11_30_night.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(50 200 400) -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.05 0.1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 -DAG_AWARENESS=(0 1) # False True - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 1 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_11_30_night_jayne.sh b/scripts/run_alibaba_experiments_osdi_11_30_night_jayne.sh deleted file mode 100755 index 9720f05a..00000000 --- a/scripts/run_alibaba_experiments_osdi_11_30_night_jayne.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(50 200 400) -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 -DAG_AWARENESS=(0 1) # False True - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_11_30_noon_arrival_rate_0.001_0.005_max_dead_var_200_400_800_cv2_1_2_4_6.sh b/scripts/run_alibaba_experiments_osdi_11_30_noon_arrival_rate_0.001_0.005_max_dead_var_200_400_800_cv2_1_2_4_6.sh deleted file mode 100755 index edfc885a..00000000 --- a/scripts/run_alibaba_experiments_osdi_11_30_noon_arrival_rate_0.001_0.005_max_dead_var_200_400_800_cv2_1_2_4_6.sh +++ /dev/null @@ -1,133 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -MAX_DEADLINE_VARIANCES=(200 400 800) -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -# POISSON_ARRIVAL_RATES=(0.01 0.05 0.1) -POISSON_ARRIVAL_RATES=(0.001 0.005) -GAMMA_COEFFICIENTS=(1 2 4 6) #cv2 -DAG_AWARENESS=(0 1) # False True - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=100 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_1_evening.sh b/scripts/run_alibaba_experiments_osdi_12_1_evening.sh deleted file mode 100755 index 8debefdc..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_1_evening.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 250 300 400) -# SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.01) -GAMMA_COEFFICIENTS=(9 10 12 14) #cv2 -DAG_AWARENESS=(1) # False True - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 1 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_1_evening_2.sh b/scripts/run_alibaba_experiments_osdi_12_1_evening_2.sh deleted file mode 100755 index f74eef12..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_1_evening_2.sh +++ /dev/null @@ -1,137 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 250 300 400) -# SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.005 0.0075) -GAMMA_COEFFICIENTS=(8 10 12) #cv2 -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=100 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 1 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_1_evening_2_jayne.sh b/scripts/run_alibaba_experiments_osdi_12_1_evening_2_jayne.sh deleted file mode 100755 index 15ca9e14..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_1_evening_2_jayne.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 300 400) -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.01 0.015 0.02) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=50 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - # sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_1_evening_3.sh b/scripts/run_alibaba_experiments_osdi_12_1_evening_3.sh deleted file mode 100755 index a61af3bb..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_1_evening_3.sh +++ /dev/null @@ -1,137 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(250 300 350 400) -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.002 0.004 0.005) -GAMMA_COEFFICIENTS=(10 11 12) #cv2 -DAG_AWARENESS=(0 1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=300 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 1 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_1_evening_3_jayne.sh b/scripts/run_alibaba_experiments_osdi_12_1_evening_3_jayne.sh deleted file mode 100755 index 7a503887..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_1_evening_3_jayne.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(300 400 500) -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.015 0.02) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 -DAG_AWARENESS=(0 1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=300 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - # sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_1_evening_jayne.sh b/scripts/run_alibaba_experiments_osdi_12_1_evening_jayne.sh deleted file mode 100755 index 340cfd86..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_1_evening_jayne.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(300 400 500) -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.02) -GAMMA_COEFFICIENTS=(9 10 12 14) #cv2 -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - # sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_1_noon.sh b/scripts/run_alibaba_experiments_osdi_12_1_noon.sh deleted file mode 100755 index 57200c4e..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_1_noon.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(100 200 400) -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.01 0.02 0.05) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 -DAG_AWARENESS=(0 1) # False True - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=100 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 1 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_1_noon_jayne.sh b/scripts/run_alibaba_experiments_osdi_12_1_noon_jayne.sh deleted file mode 100755 index 91c72a7d..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_1_noon_jayne.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(100 200 400) -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.01 0.02 0.05) -GAMMA_COEFFICIENTS=(6 8 10) #cv2 -DAG_AWARENESS=(0 1) # False True - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=100 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 1 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_afternoon.sh b/scripts/run_alibaba_experiments_osdi_12_2_afternoon.sh deleted file mode 100755 index 5df3b06b..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_afternoon.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.5 1 2 4) # Tune this -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=50 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_afternoon_jayne.sh b/scripts/run_alibaba_experiments_osdi_12_2_afternoon_jayne.sh deleted file mode 100755 index ec170f75..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_afternoon_jayne.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.05 0.1 0.25) # Tune this -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=50 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_morning.sh b/scripts/run_alibaba_experiments_osdi_12_2_morning.sh deleted file mode 100755 index a314f781..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_morning.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(250 300 400) -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.005 0.0075) -GAMMA_COEFFICIENTS=(8 9 10) #cv2 -DAG_AWARENESS=(0 1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=300 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 1 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_morning_jayne.sh b/scripts/run_alibaba_experiments_osdi_12_2_morning_jayne.sh deleted file mode 100755 index af6bc1d1..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_morning_jayne.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 300 400) -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.01 0.015 0.02) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 -DAG_AWARENESS=(0 1) # False True -TASK_CPU_DIVISOR=20 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - # sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_night.sh b/scripts/run_alibaba_experiments_osdi_12_2_night.sh deleted file mode 100755 index bc36ae47..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_night.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.005 0.012 0.025) # Tune this -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=50 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_night_2.5.sh b/scripts/run_alibaba_experiments_osdi_12_2_night_2.5.sh deleted file mode 100755 index 14598296..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_night_2.5.sh +++ /dev/null @@ -1,141 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.012 0.025 0.04) # Tune this -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -DAG_AWARENESS=(0) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=300 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || ${DAG_AWARE} == 1 ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_night_2.5_heter_simontam.sh b/scripts/run_alibaba_experiments_osdi_12_2_night_2.5_heter_simontam.sh deleted file mode 100755 index d3a44e3b..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_night_2.5_heter_simontam.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.012 0.02) # Tune this -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -DAG_AWARENESS=(0 1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=300 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_night_2.sh b/scripts/run_alibaba_experiments_osdi_12_2_night_2.sh deleted file mode 100755 index 20c139f8..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_night_2.sh +++ /dev/null @@ -1,136 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.012 0.025 0.04) # Tune this -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=300 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || ${DAG_AWARE} == 1 ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_night_2_heter_simontam.sh b/scripts/run_alibaba_experiments_osdi_12_2_night_2_heter_simontam.sh deleted file mode 100755 index fb1eac22..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_night_2_heter_simontam.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.08 0.012 0.02) # Tune this -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -DAG_AWARENESS=(0 1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=300 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_night_heter_simontam.sh b/scripts/run_alibaba_experiments_osdi_12_2_night_heter_simontam.sh deleted file mode 100755 index d58c8927..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_night_heter_simontam.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.012 0.025) # Tune this -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=50 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_night_jayne.sh b/scripts/run_alibaba_experiments_osdi_12_2_night_jayne.sh deleted file mode 100755 index 45b01277..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_night_jayne.sh +++ /dev/null @@ -1,141 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1 10 20) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.012 0.025 0.04) # Tune this -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -DAG_AWARENESS=(0 1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=300 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - # No need to run TetriSched dist 1 and DAG aware - if [[ ${SCHEDULER} == TetriSched && ( "${SCHEDULER_TIME_DISCRETIZATION}" == "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || ${DAG_AWARE} == 1 ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_2_noon_jayne.sh b/scripts/run_alibaba_experiments_osdi_12_2_noon_jayne.sh deleted file mode 100755 index b561937b..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_2_noon_jayne.sh +++ /dev/null @@ -1,137 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(100 200 400) -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -POISSON_ARRIVAL_RATES=(0.01 0.02 0.05) -GAMMA_COEFFICIENTS=(6 8 10) #cv2 -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 -TASK_DURATION_MULTIPLER=2 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=100 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} ---alibaba_task_duration_multiplier=${TASK_DURATION_MULTIPLER} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - # sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_3_morning_heter_simontam.sh b/scripts/run_alibaba_experiments_osdi_12_3_morning_heter_simontam.sh deleted file mode 100755 index 62dab3b3..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_3_morning_heter_simontam.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -# RELEASE_POLICIES=(fixed poisson gamma) -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.012 0.025 0.04) # Tune this -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -DAG_AWARENESS=(0 1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=50 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - -for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF && ( "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" || "${DAG_AWARE}" -ne "${DAG_AWARENESS[0]}" ) ]]; then - continue - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - - if [[ ${SCHEDULER} != EDF ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - sleep 0.5 - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_3_night_jayne.sh b/scripts/run_alibaba_experiments_osdi_12_3_night_jayne.sh deleted file mode 100755 index 72e8fa71..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_3_night_jayne.sh +++ /dev/null @@ -1,142 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.025 0.035) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=150 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_3_night_rivertam.sh b/scripts/run_alibaba_experiments_osdi_12_3_night_rivertam.sh deleted file mode 100755 index 4967e167..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_3_night_rivertam.sh +++ /dev/null @@ -1,142 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.045 0.055) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=150 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster_20_slots - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_3_night_simontam.sh b/scripts/run_alibaba_experiments_osdi_12_3_night_simontam.sh deleted file mode 100755 index 747a499b..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_3_night_simontam.sh +++ /dev/null @@ -1,142 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.04 0.05) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=150 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster_20_slots - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_3_night_sysml.sh b/scripts/run_alibaba_experiments_osdi_12_3_night_sysml.sh deleted file mode 100755 index 02e58c78..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_3_night_sysml.sh +++ /dev/null @@ -1,142 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.03 0.02) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=150 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes.sh b/scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes.sh deleted file mode 100755 index 35928272..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes.sh +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.03 0.02) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 -OPTIMIZATION_PASS=1 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=150 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes_2.sh b/scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes_2.sh deleted file mode 100755 index 0b94c46c..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes_2.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(20 10) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.03 0.02) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 -OPTIMIZATION_PASS=1 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=150 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=debug -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---scheduler_log_to_file ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes_2_no_optimization.sh b/scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes_2_no_optimization.sh deleted file mode 100755 index 52c107b3..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_4_evening_sysml_test_optimization_passes_2_no_optimization.sh +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 50 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(20 10) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.03 0.02) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=150 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=debug -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay -WORKER_CONFIG=alibaba_cluster - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---scheduler_log_to_file ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_4_night_alind_homo_dynamic_discretization.sh b/scripts/run_alibaba_experiments_osdi_12_4_night_alind_homo_dynamic_discretization.sh deleted file mode 100755 index 7d7b83cb..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_4_night_alind_homo_dynamic_discretization.sh +++ /dev/null @@ -1,168 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4 2) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.055 0.05) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -WORKER_CONFIG=alibaba_cluster_20_slots - -DYNAMIC_DISCRETIZATION=1 -MAX_OCCUPANCY_THRESHOLD=0.7 -MAX_TIME_DISCRETIZATION=5 - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=500 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - MYCONF+=" ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=${MAX_TIME_DISCRETIZATION} ---scheduler_max_occupancy_threshold=${MAX_OCCUPANCY_THRESHOLD} -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_${MAX_OCCUPANCY_THRESHOLD}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_4_night_jayne_hetero.sh b/scripts/run_alibaba_experiments_osdi_12_4_night_jayne_hetero.sh deleted file mode 100755 index aa73024d..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_4_night_jayne_hetero.sh +++ /dev/null @@ -1,154 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4 2) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.055 0.05) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_20_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=250 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_4_night_rivertam_hetero.sh b/scripts/run_alibaba_experiments_osdi_12_4_night_rivertam_hetero.sh deleted file mode 100755 index 1334554d..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_4_night_rivertam_hetero.sh +++ /dev/null @@ -1,154 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4 2) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.04 0.045) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_20_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=250 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_4_night_sheperd_hetero.sh b/scripts/run_alibaba_experiments_osdi_12_4_night_sheperd_hetero.sh deleted file mode 100755 index 04c208f3..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_4_night_sheperd_hetero.sh +++ /dev/null @@ -1,154 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4 2) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.08 0.11) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_40_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=250 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_4_night_simontam_homo.sh b/scripts/run_alibaba_experiments_osdi_12_4_night_simontam_homo.sh deleted file mode 100755 index 04ecd47d..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_4_night_simontam_homo.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4 2) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.04 0.045) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -WORKER_CONFIG=alibaba_cluster_20_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=250 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_4_night_sysml_homo.sh b/scripts/run_alibaba_experiments_osdi_12_4_night_sysml_homo.sh deleted file mode 100755 index 49dc9ab0..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_4_night_sysml_homo.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4 2) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.055 0.05) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -WORKER_CONFIG=alibaba_cluster_20_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=250 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_afternoon_jayne_hetero.sh b/scripts/run_alibaba_experiments_osdi_12_5_afternoon_jayne_hetero.sh deleted file mode 100755 index f8c1f126..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_afternoon_jayne_hetero.sh +++ /dev/null @@ -1,154 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4 2) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.1 0.2 0.4) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_80_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=150 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --dry_run --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_afternoon_sysml_homo.sh b/scripts/run_alibaba_experiments_osdi_12_5_afternoon_sysml_homo.sh deleted file mode 100755 index db183217..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_afternoon_sysml_homo.sh +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4 2) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.025) # Tune this -DAG_AWARENESS=(1 0) # False True -TASK_CPU_DIVISOR=25 - -WORKER_CONFIG=alibaba_cluster - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=500 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_evening_jayne_hetero.sh b/scripts/run_alibaba_experiments_osdi_12_5_evening_jayne_hetero.sh deleted file mode 100755 index a98f4102..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_evening_jayne_hetero.sh +++ /dev/null @@ -1,154 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(15 25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.4 0.5 0.7 1 1.5) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_80_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=50 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then -# --log_dir=${LOG_DIR}/${LOG_BASE} -# --scheduler_log_to_file - MYCONF="\ ---log_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.log ---csv_file_name=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_evening_sysml_hetero_fix_gamma.sh b/scripts/run_alibaba_experiments_osdi_12_5_evening_sysml_hetero_fix_gamma.sh deleted file mode 100755 index 4e72dc84..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_evening_sysml_hetero_fix_gamma.sh +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(3) # Tune this -BASE_ARRIVAL_RATES=(0.5) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_40_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=50 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for BASE_ARRIVAL_RATE in ${BASE_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_evening_sysml_hetero_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_5_evening_sysml_hetero_fix_gamma_w_dynamic.sh deleted file mode 100755 index 20f680cc..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_evening_sysml_hetero_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,165 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 100 200) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(3) # Tune this -BASE_ARRIVAL_RATES=(0.5) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_40_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=100 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for BASE_ARRIVAL_RATE in ${BASE_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_night_jayne_hetero_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_5_night_jayne_hetero_fix_gamma_w_dynamic.sh deleted file mode 100755 index 7400dd17..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_night_jayne_hetero_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.48 0.72) # Tune this -BASE_ARRIVAL_RATES=(0.24 0.36) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_40_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=400 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_night_rivertam_hetero_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_5_night_rivertam_hetero_fix_gamma_w_dynamic.sh deleted file mode 100755 index b2a6ce6c..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_night_rivertam_hetero_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.06 0.08) # Tune this -BASE_ARRIVAL_RATES=(0.03 0.04) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_20_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=400 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_night_sheperd_homo_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_5_night_sheperd_homo_fix_gamma_w_dynamic.sh deleted file mode 100755 index 74856b3f..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_night_sheperd_homo_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.48 0.72) # Tune this -BASE_ARRIVAL_RATES=(0.24 0.36) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=0 -WORKER_CONFIG=alibaba_cluster_40_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=400 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_night_simontam_homo_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_5_night_simontam_homo_fix_gamma_w_dynamic.sh deleted file mode 100755 index 62f5e369..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_night_simontam_homo_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.06 0.08) # Tune this -BASE_ARRIVAL_RATES=(0.03 0.04) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=0 -WORKER_CONFIG=alibaba_cluster_20_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=400 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_night_sysml_02_homo_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_5_night_sysml_02_homo_fix_gamma_w_dynamic.sh deleted file mode 100755 index 7d00c2fa..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_night_sysml_02_homo_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(6 8 10) # Tune this -BASE_ARRIVAL_RATES=(3 4 5) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=0 -WORKER_CONFIG=alibaba_cluster_80_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=400 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_5_night_sysml_05_hetero_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_5_night_sysml_05_hetero_fix_gamma_w_dynamic.sh deleted file mode 100755 index f183ddc4..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_5_night_sysml_05_hetero_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(6 8 10) # Tune this -BASE_ARRIVAL_RATES=(3 4 5) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_80_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=400 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_6_day_rivertam_hetero_fix_gamma_w_dynamic_max10.sh b/scripts/run_alibaba_experiments_osdi_12_6_day_rivertam_hetero_fix_gamma_w_dynamic_max10.sh deleted file mode 100755 index 82192523..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_6_day_rivertam_hetero_fix_gamma_w_dynamic_max10.sh +++ /dev/null @@ -1,172 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.06 0.08) # Tune this -BASE_ARRIVAL_RATES=(0.03 0.04) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 -DYNAMIC_DISCRETIZATION=(1) - -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_20_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=400 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=10 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_6_day_sheperd_homo_fix_gamma_w_dynamic_10.sh b/scripts/run_alibaba_experiments_osdi_12_6_day_sheperd_homo_fix_gamma_w_dynamic_10.sh deleted file mode 100755 index 2d23afed..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_6_day_sheperd_homo_fix_gamma_w_dynamic_10.sh +++ /dev/null @@ -1,172 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.48 0.72) # Tune this -BASE_ARRIVAL_RATES=(0.24 0.36) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 -DYNAMIC_DISCRETIZATION=(1) - -HETEROGENEOUS_RESOURCE=0 -WORKER_CONFIG=alibaba_cluster_40_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=400 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=10 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_6_morning_sysml_02_homo_fix_gamma_w_dynamic_test.sh b/scripts/run_alibaba_experiments_osdi_12_6_morning_sysml_02_homo_fix_gamma_w_dynamic_test.sh deleted file mode 100755 index f4fdd63e..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_6_morning_sysml_02_homo_fix_gamma_w_dynamic_test.sh +++ /dev/null @@ -1,171 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(200 25 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.025) # Tune this -BASE_ARRIVAL_RATES=(0) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -HETEROGENEOUS_RESOURCE=0 -WORKER_CONFIG=alibaba_cluster - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=50 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE}/ ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_7_night_jayne_hetero_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_7_night_jayne_hetero_fix_gamma_w_dynamic.sh deleted file mode 100755 index f14b6786..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_7_night_jayne_hetero_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.48 0.72) # Tune this -BASE_ARRIVAL_RATES=(0.24 0.36) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -DYNAMIC_DISCRETIZATION=1 -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_40_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - export TETRISCHED_LOGGING_DIR="${LOG_DIR}/${LOG_BASE}/" - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_7_night_rivertam_hetero_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_7_night_rivertam_hetero_fix_gamma_w_dynamic.sh deleted file mode 100755 index 50c631fa..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_7_night_rivertam_hetero_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.06 0.08) # Tune this -BASE_ARRIVAL_RATES=(0.03 0.04) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -DYNAMIC_DISCRETIZATION=1 -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_20_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - export TETRISCHED_LOGGING_DIR="${LOG_DIR}/${LOG_BASE}/" - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_7_night_sheperd_homo_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_7_night_sheperd_homo_fix_gamma_w_dynamic.sh deleted file mode 100755 index 540967ae..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_7_night_sheperd_homo_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.48 0.72) # Tune this -BASE_ARRIVAL_RATES=(0.24 0.36) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -DYNAMIC_DISCRETIZATION=1 -HETEROGENEOUS_RESOURCE=0 -WORKER_CONFIG=alibaba_cluster_40_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - export TETRISCHED_LOGGING_DIR="${LOG_DIR}/${LOG_BASE}/" - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_7_night_simontam_homo_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_7_night_simontam_homo_fix_gamma_w_dynamic.sh deleted file mode 100755 index 2eaa5745..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_7_night_simontam_homo_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(0.06 0.08) # Tune this -BASE_ARRIVAL_RATES=(0.03 0.04) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -DYNAMIC_DISCRETIZATION=1 -HETEROGENEOUS_RESOURCE=0 -WORKER_CONFIG=alibaba_cluster_20_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - export TETRISCHED_LOGGING_DIR="${LOG_DIR}/${LOG_BASE}/" - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_7_night_sysml_02_homo_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_7_night_sysml_02_homo_fix_gamma_w_dynamic.sh deleted file mode 100755 index 7c1c15b3..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_7_night_sysml_02_homo_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,176 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(6 8 10) # Tune this -BASE_ARRIVAL_RATES=(3 4 5) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -DYNAMIC_DISCRETIZATION=1 -HETEROGENEOUS_RESOURCE=0 -WORKER_CONFIG=alibaba_cluster_80_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - export TETRISCHED_LOGGING_DIR="${LOG_DIR}/${LOG_BASE}/" - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[2]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[2]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file diff --git a/scripts/run_alibaba_experiments_osdi_12_7_night_sysml_05_hetero_fix_gamma_w_dynamic.sh b/scripts/run_alibaba_experiments_osdi_12_7_night_sysml_05_hetero_fix_gamma_w_dynamic.sh deleted file mode 100755 index 89973bb0..00000000 --- a/scripts/run_alibaba_experiments_osdi_12_7_night_sysml_05_hetero_fix_gamma_w_dynamic.sh +++ /dev/null @@ -1,176 +0,0 @@ -#!/bin/bash -# $1 directory where to save the logs. - -# Scheduler runtimes in us.TetriSched -SCHEDULERS=(EDF TetriSched) -# MAX_DEADLINE_VARIANCES=(25 50 100 200) -# MAX_DEADLINE_VARIANCES=(200 400 800) -MAX_DEADLINE_VARIANCES=(25 200 50 100) # Keep deadline tight. Don't change this -SCHEDULER_TIME_DISCRETIZATIONS=(1) -GAMMA_COEFFICIENTS=(1 2 4) #cv2 don't change this -RELEASE_POLICIES=(fixed_gamma) -# POISSON_ARRIVAL_RATES=(0.2 0.5 1 2) -POISSON_ARRIVAL_RATES=(6 8 10) # Tune this -BASE_ARRIVAL_RATES=(3 4 5) # Tune this -DAG_AWARENESS=(1) # False True -TASK_CPU_DIVISOR=25 - -DYNAMIC_DISCRETIZATION=1 -HETEROGENEOUS_RESOURCE=1 -WORKER_CONFIG=alibaba_cluster_heterogeneous_80_slots - -ERDOS_SIMULATOR_DIR="." # Change this to the directory where the simulator is located. -MIN_DEADLINE_VARIANCE=10 -NUM_INVOCATIONS=200 -SCHEDULER_LOG_TIMES=10 -SCHEDULER_RUNTIME=0 -LOG_LEVEL=info -REPLAY_TRACE=alibaba -WORKLOAD_PROFILE_PATH=./traces/alibaba-cluster-trace-v2018/alibaba_set_0_6600_dags.pkl -EXECUTION_MODE=replay - -PARALLEL_FACTOR=2 -# Move to the simulator directory. -if [[ -z ${ERDOS_SIMULATOR_DIR} ]]; then - echo "[x] ERRROR: ERDOS_SIMULATOR_DIR is not set" - exit 1 -fi -cd ${ERDOS_SIMULATOR_DIR} - -LOG_DIR=$1 -if [[ -z ${LOG_DIR} ]]; then - echo "[x] ERROR: Please provide a directory to output results to as the first argument." - exit 2 -fi - -execute_experiment () { - LOG_DIR=$1 - LOG_BASE=$2 - echo "[x] Initiating the execution of ${LOG_BASE}" - if [ ! -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - -# --scheduler_log_to_file - export TETRISCHED_LOGGING_DIR="${LOG_DIR}/${LOG_BASE}/" - MYCONF="\ ---log_dir=${LOG_DIR}/${LOG_BASE} ---log_file_name=${LOG_BASE}.log ---csv_file_name=${LOG_BASE}.csv ---log_level=${LOG_LEVEL} ---execution_mode=${EXECUTION_MODE} ---replay_trace=${REPLAY_TRACE} ---max_deadline_variance=${MAX_DEADLINE_VARIANCE} ---min_deadline_variance=${MIN_DEADLINE_VARIANCE} ---workload_profile_path=${WORKLOAD_PROFILE_PATH} ---override_num_invocations=${NUM_INVOCATIONS} ---override_base_arrival_rate=${BASE_ARRIVAL_RATE} ---randomize_start_time_max=100 ---worker_profile_path=profiles/workers/${WORKER_CONFIG}.yaml ---scheduler_runtime=${SCHEDULER_RUNTIME} ---override_release_policy=${RELEASE_POLICY} ---scheduler=${SCHEDULER} ---alibaba_loader_task_cpu_divisor=${TASK_CPU_DIVISOR} -" - if [[ ${RELEASE_POLICY} == fixed ]]; then - MYCONF+="--override_arrival_period=10 -" - else - MYCONF+="--override_poisson_arrival_rate=${POISSON_ARRIVAL_RATE} -" - if [[ ${RELEASE_POLICY} == gamma || ${RELEASE_POLICY} == fixed_gamma ]]; then - MYCONF+="--override_gamma_coefficient=${GAMMA_COEFFICIENT} -" - fi - fi - - if [[ ${DAG_AWARE} == 1 ]]; then - MYCONF+="--release_taskgraphs -" - fi - - if [[ ${HETEROGENEOUS_RESOURCE} == 1 ]]; then - MYCONF+="--alibaba_enable_heterogeneous_resource_type -" - fi - - if [[ ${OPTIMIZATION_PASS} == 1 ]]; then - MYCONF+="--scheduler_enable_optimization_pass -" - fi - - if [[ ${SCHEDULER} != EDF ]]; then - MYCONF+=" ---enforce_deadlines ---retract_schedules ---drop_skipped_tasks ---scheduler_log_times=${SCHEDULER_LOG_TIMES} ---scheduler_time_discretization=${SCHEDULER_TIME_DISCRETIZATION} ---scheduler_dynamic_discretization ---scheduler_max_time_discretization=5 ---scheduler_max_occupancy_threshold=0.7 - " - fi - echo "${MYCONF}" > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf - if ! time python3 main.py --flagfile=${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.conf > ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.output; then - echo "[x] Failed in the execution of ${LOG_BASE}. Exiting." - exit 3 - fi - else - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - fi - echo "[x] Finished execution of ${LOG_BASE}." -} - - -for POISSON_ARRIVAL_RATE in ${POISSON_ARRIVAL_RATES[@]}; do - for SCHEDULER in ${SCHEDULERS[@]}; do - for RELEASE_POLICY in ${RELEASE_POLICIES[@]}; do - for GAMMA_COEFFICIENT in ${GAMMA_COEFFICIENTS[@]}; do - for MAX_DEADLINE_VARIANCE in ${MAX_DEADLINE_VARIANCES[@]}; do - for SCHEDULER_TIME_DISCRETIZATION in ${SCHEDULER_TIME_DISCRETIZATIONS[@]}; do - for DAG_AWARE in ${DAG_AWARENESS[@]}; do - if [[ ${SCHEDULER} == EDF ]]; then - if [[ "${SCHEDULER_TIME_DISCRETIZATION}" -ne "${SCHEDULER_TIME_DISCRETIZATIONS[0]}" ]]; then - continue - fi - DAG_AWARE=0 - fi - - # TODO: Make this more elegant. - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[0]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[0]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[1]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[1]} - fi - if [[ ${POISSON_ARRIVAL_RATE} == ${POISSON_ARRIVAL_RATES[2]} ]]; then - BASE_ARRIVAL_RATE=${BASE_ARRIVAL_RATES[2]} - fi - - LOG_BASE=${REPLAY_TRACE}_scheduler_${SCHEDULER}_release_policy_${RELEASE_POLICY}_max_deadline_var_${MAX_DEADLINE_VARIANCE}_dag_aware_${DAG_AWARE}_poisson_arrival_rate_${POISSON_ARRIVAL_RATE}_gamma_coefficient_${GAMMA_COEFFICIENT}_base_arrival_rate_${BASE_ARRIVAL_RATE} - if [[ ${SCHEDULER} == TetriSched ]]; then - LOG_BASE+="_scheduler_discretization_${SCHEDULER_TIME_DISCRETIZATION}" - if [[ ${DYNAMIC_DISCRETIZATION} == 1 ]]; then - LOG_BASE+="_dynamic_max_occupancy_threshold_0.7_noCPOptPass" - fi - fi - - if [ -f "${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv" ]; then - echo "[x] ${LOG_DIR}/${LOG_BASE}/${LOG_BASE}.csv already exists." - continue - fi - - mkdir -p ${LOG_DIR}/${LOG_BASE} - execute_experiment ${LOG_DIR} ${LOG_BASE} & - if [[ $(jobs -r -p | wc -l) -ge $PARALLEL_FACTOR ]]; then - echo "[x] Waiting for a job to terminate because $PARALLEL_FACTOR jobs are running." - wait -n - fi - done - done - done - done - done - done -done -wait -echo "[x] Finished executing all experiments." \ No newline at end of file