From eccf82bb8c05b5aad799402020d7422e91bfbe25 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Tue, 11 Feb 2025 23:23:52 +0900 Subject: [PATCH 01/38] Feat sps collect --- .../aws/collector/server/sps/collect_sps.py | 232 ++++++++++++++++++ .../aws/collector/server/sps/requirements.txt | 15 ++ .../aws/collector/server/sps/sps_query_api.py | 109 ++++++++ .../aws/collector/server/sps/start_query.sh | 9 + 4 files changed, 365 insertions(+) create mode 100644 collector/spot-dataset/aws/collector/server/sps/collect_sps.py create mode 100644 collector/spot-dataset/aws/collector/server/sps/requirements.txt create mode 100644 collector/spot-dataset/aws/collector/server/sps/sps_query_api.py create mode 100644 collector/spot-dataset/aws/collector/server/sps/start_query.sh diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py new file mode 100644 index 00000000..17ac09df --- /dev/null +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -0,0 +1,232 @@ +# ------ import module ------ +from datetime import datetime, timezone +import boto3.session, botocore +import sys, os, argparse +import pickle, gzip, json +import pandas as pd +from io import StringIO + +# ------ import user module ------ +# memo: change the path +sys.path.append("/home/ubuntu/spotlake/utility") +from slack_msg_sender import send_slack_message +from sps_query_api import query_sps + +def main(): + # ------ Setting Client ------ + session = boto3.session.Session(profile_name="spotlake") + s3 = session.resource("s3") + s3_client = session.client("s3", region_name="us-west-2") + + # ------ Create Index Files ------ + # memo: change the cloud path + CURRENT_PATH = "/home/ubuntu/spotlake/collector/spot-dataset/aws/collector/" + + CREDENTIAL_START_INDEX_FILE_NAME = f"{CURRENT_PATH}/credential_index.txt" + if not os.path.exists(CREDENTIAL_START_INDEX_FILE_NAME): + with open(CREDENTIAL_START_INDEX_FILE_NAME, 'w') as file: + file.write('0\n0') + TARGET_CAPACITY_INDEX_FILE_NAME = f"{CURRENT_PATH}/target_capacity_index.txt" + if not os.path.exists(TARGET_CAPACITY_INDEX_FILE_NAME): + with open(TARGET_CAPACITY_INDEX_FILE_NAME, 'w') as file: + file.write('0\n0') + + # ------ Receive UTC Time Data ------ + parser = argparse.ArgumentParser() + parser.add_argument('--timestamp', dest='timestamp', action='store') + args = parser.parse_args() + timestamp_utc = datetime.strptime(args.timestamp, "%Y-%m-%dT%H:%M") + + print(f"스크립트 실행 시작 시간 (UTC) : {timestamp_utc}") + + # ------ Modify Date Data Format ------ + date = args.timestamp.split("T")[0] + timestamp_utc = timestamp_utc.replace(minute=((timestamp_utc.minute // 10) * 10), second=0) + S3_DIR_NAME = timestamp_utc.strftime("%Y/%m/%d") + S3_OBJECT_PREFIX = timestamp_utc.strftime("%H-%M") + execution_time_start = datetime.now(timezone.utc) + + # ------ Save Value of Credential Start Index ------ + with open(CREDENTIAL_START_INDEX_FILE_NAME, 'r') as f: + init_credential_index, current_credential_index = map(int, f.readlines()) + + # ------ Set Target Capacities ------ + target_capacities = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50] + with open(TARGET_CAPACITY_INDEX_FILE_NAME, 'r') as f: + init_target_capacity_index, target_capacity_index = map(int, f.readlines()) + target_capacity_index = target_capacity_index % len(target_capacities) + target_capacity = target_capacities[target_capacity_index] + + # ------ Load Workload File ------- + BUCKET_NAME = "spotlake" + BUCKET_FILE_PATH = "rawdata/aws/workloads" + + start_time = datetime.now(timezone.utc) + workload = None + try: + key = f"{BUCKET_FILE_PATH}/{S3_DIR_NAME}/binpacked_workloads.pkl.gz" + workload = pickle.load(gzip.open(s3.Object(BUCKET_NAME, key).get()["Body"])) + + local_workload_path = f"{CURRENT_PATH}/{date}_binpacked_workloads.pkl.gz" + + # workload파일을 새로 받았다면 다운로드 + if not os.path.exists(local_workload_path): + for filename in os.listdir(f"{CURRENT_PATH}"): + if "_binpacked_workloads.pkl.gz" in filename: + os.remove(f"{CURRENT_PATH}/{filename}") + + s3_client.download_file(BUCKET_NAME, key, local_workload_path) + # workload 파일이 바뀌었으므로 계정 묶음 change + init_credential_index = 1800 if init_credential_index == 0 else 0 + with open(CREDENTIAL_START_INDEX_FILE_NAME, 'w') as f: + f.write(f"{str(init_credential_index)}\n{str(init_credential_index)}") + # workload 파일이 바뀌었으므로 index location save + init_target_capacity_index = target_capacity_index + with open(TARGET_CAPACITY_INDEX_FILE_NAME, 'w') as f: + f.write(f"{str(init_target_capacity_index)}\n{str(init_target_capacity_index)}") + except Exception as e: + message = f"bucket : {BUCKET_NAME}, object : {key} 가 수집되지 않았습니다.\n서버에 있는 로컬 workload파일을 불러옵니다." + send_slack_message(message) + print(message) + is_local = False + for filename in os.listdir(f"{CURRENT_PATH}"): + if "_binpacked_workloads.pkl.gz" in filename: + print(f"로컬 워크로드 파일 {CURRENT_PATH}/{filename} 사용") + with open(f"{CURRENT_PATH}/{filename}", 'rb') as f: + workload = pickle.load(gzip.open(f)) + is_local = True + break + if not is_local: + message = f"로컬파일에 workload파일이 존재하지 않습니다." + send_slack_message(message) + print(message) + raise Exception("does not exist local workloads file") + print(f"계정 시작 인덱스 : {current_credential_index}") + + # ------ Load Credential File ------ + CREDENTIAL_FILE_PATH = "aws/credentials/credential_3699.csv" + credentials = None + try: + csv_content = s3.Object(BUCKET_NAME, CREDENTIAL_FILE_PATH).get()["Body"].read().decode('utf-8') + credentials = pd.read_csv(StringIO(csv_content)) + except Exception as e: + send_slack_message(e) + print(e) + raise e + + end_time = datetime.now(timezone.utc) + print(f"Load credential and workload time : {(end_time - start_time).total_seconds():.4f} ms") + + # ------ Start Query Per Target Capacity ------ + start_time = datetime.now(timezone.utc) + start_credential_index = current_credential_index + + try: + df_list = [] + for scenarios in workload: + while True: + try: + args = (credentials.iloc[current_credential_index], scenarios, target_capacity) + current_credential_index += 1 + df = query_sps(args) + df_list.append(df) + break + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'MaxConfigLimitExceeded': + continue + else: + send_slack_message(e) + print(e) + raise e + except Exception as e: + send_slack_message(e) + print(e) + raise e + break + sps_df = pd.concat(df_list, axis=0, ignore_index=True) + except Exception as e: + message = f"error at query_sps\nerror : {e}" + send_slack_message(message) + print(message) + raise e + + # ------ Update config files ------ + next_target_capacity_index = (target_capacity_index + 1) % len(target_capacities) + print(next_target_capacity_index) + if next_target_capacity_index == init_target_capacity_index: + with open(CREDENTIAL_START_INDEX_FILE_NAME, "w") as f: + f.write(f"{str(init_credential_index)}\n{str(init_credential_index)}") + else: + with open(CREDENTIAL_START_INDEX_FILE_NAME, "w") as f: + f.write(f"{str(init_credential_index)}\n{str(current_credential_index)}") + with open(TARGET_CAPACITY_INDEX_FILE_NAME, "w") as f: + f.write(f"{str(init_target_capacity_index)}\n{str(next_target_capacity_index)}") + + end_time = datetime.now(timezone.utc) + print(f"Target Capacity {target_capacity} query time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + print(f"사용한 credential range : {(start_credential_index, current_credential_index)}") + + start_time = datetime.now(timezone.utc) + # ------ Save Dataframe File ------ + try: + object_name = f"{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl" + saved_filename = f"{CURRENT_PATH}/" + f"{object_name}" + try: + pickle.dump(sps_df, open(saved_filename, "wb")) + gzip.open(f"{saved_filename}.gz", "wb").writelines(open(f"{saved_filename}", "rb")) + except Exception as e: + send_slack_message(e) + print(e) + # memo: change the saving cloud path + s3_client.upload_fileobj(open(f"{saved_filename}.gz", "rb"), BUCKET_NAME, f"rawdata/aws/sps/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl.gz") + os.remove(f"{saved_filename}") + os.remove(f"{saved_filename}.gz") + except Exception as e: + send_slack_message(e) + print(e) + raise e + end_time = datetime.now(timezone.utc) + print(f"Saving time of DF File is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + # ------ Monitoring for total execution time ------ + execution_time_end = datetime.now(timezone.utc) + total_execution_time = (execution_time_end - execution_time_start).total_seconds() + if total_execution_time >= 600000: + message = f"sps 쿼리 시간이 10분을 초과하였습니다 : {total_execution_time} ms" + message += f"\n실행 시작 시간 (UTC) : {timestamp_utc}" + send_slack_message(message) + print(message) + + # ------ Upload Collecting Data Number at Cloud Logs ------ + log_client = session.client('logs', 'us-west-2') + # memo: change the log group name + log_group_name = "SPS-Server-Data-Count" + log_stream_name = "aws" + + try: + message = json.dumps({"MUMBER_ROWS" : sps_df.shape[0]}) + timestamp = int(datetime.now(timezone.utc).timestamp() * 1000) + try: + response = log_client.put_log_events( + logGroupName = log_group_name, + logStreamName = log_stream_name, + logEvents = [ + { + 'timestamp' : timestamp, + 'message' : message + }, + ], + ) + except Exception as e: + print(e) + raise e + except Exception as e: + print(e) + raise e + print(f"수집된 DataFrame 행 수 : {sps_df.shape[0]}") + +if __name__ == "__main__": + start_time = datetime.now(timezone.utc) + main() + end_time = datetime.now(timezone.utc) + print(f"Running time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") \ No newline at end of file diff --git a/collector/spot-dataset/aws/collector/server/sps/requirements.txt b/collector/spot-dataset/aws/collector/server/sps/requirements.txt new file mode 100644 index 00000000..078fc6a5 --- /dev/null +++ b/collector/spot-dataset/aws/collector/server/sps/requirements.txt @@ -0,0 +1,15 @@ +absl-py +boto3 +botocore +immutabledict +jmespath +numpy +ortools +pandas +protobuf +python-dateutil +pytz +s3transfer +six +tzdata +urllib3 diff --git a/collector/spot-dataset/aws/collector/server/sps/sps_query_api.py b/collector/spot-dataset/aws/collector/server/sps/sps_query_api.py new file mode 100644 index 00000000..01cf4d3c --- /dev/null +++ b/collector/spot-dataset/aws/collector/server/sps/sps_query_api.py @@ -0,0 +1,109 @@ +import boto3 +import botocore +import time +import requests +import pandas as pd +import numpy as np + +IDX_INSTANCE_TYPE = 0 +IDX_REGION_NAMES = 1 +IDX_NUMBER_RESPONSE = 2 + +REGION=None + +# SPS 점수를 계정별로 받아오는 함수입니다. +# args는 다음과 같이 구성된 튜플이어야 합니다 +# (credential, scenarios, target_capacity) +# credential은 dataframe의 행 형태로 주어져야 합니다. +# scenarios는 쿼리 시나리오 50개 묶음이어야 합니다. +def query_sps(args): + credential = args[0] + scenarios = args[1] + target_capacity = args[2] + region = get_region() + + session = boto3.session.Session( + aws_access_key_id = credential["AccessKeyId"], + aws_secret_access_key = credential["SecretAccessKey"] + ) + ec2 = session.client('ec2', region_name = region) + + sps_dict = { + "InstanceType" : [], + "Region" : [], + "AZ" : [], + "SPS" : [], + "TargetCapacity" : [], + "T3": [], + "T2": [] + } + + for scenario in scenarios: + instance_type = scenario[IDX_INSTANCE_TYPE] + region_names = scenario[IDX_REGION_NAMES] + + # exponential backoff 전략을 사용합니다. + retries = 0 + max_retries = 10 + while retries <= max_retries: + try: + response = ec2.get_spot_placement_scores( + InstanceTypes = [instance_type], + RegionNames = region_names, + SingleAvailabilityZone = True, + TargetCapacity = target_capacity + ) + scores = response["SpotPlacementScores"] + break + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == "RequestLimitExceeded": + wait_time = 2 ** retries + print(f"RequestLimitExceeded! {wait_time}초 후 재시도합니다.") + time.sleep(wait_time) + retries += 1 + else: + raise e + + for score in scores: + sps_dict["InstanceType"].append(instance_type) + sps_dict["Region"].append(score["Region"]) + sps_dict["AZ"].append(score['AvailabilityZoneId']) + sps_dict["SPS"].append(int(score["Score"])) + sps_dict["TargetCapacity"].append(target_capacity) + if score['AvailabilityZoneId'] == 3: + sps_dict["T3"].append(target_capacity) + else: + sps_dict["T3"].append(0) + if score['AvailabilityZoneId'] == 1: + sps_dict["T2"].append(0) + else: + sps_dict["T2"].append(target_capacity) + + return pd.DataFrame(sps_dict) + +def get_token(): + token_url = "http://169.254.169.254/latest/api/token" + headers = {"X-aws-ec2-metadata-token-ttl-seconds": "5"} + response = requests.put(token_url, headers=headers) + if response.status_code == 200: + return response.text + else: + raise Exception("토큰을 가져오는 데 실패했습니다. 상태 코드: {}".format(response.status_code)) + +def get_region(): + global REGION + if REGION is not None: + return REGION + token = get_token() + if token: + metadata_url = "http://169.254.169.254/latest/dynamic/instance-identity/document" + headers = {"X-aws-ec2-metadata-token": token} + response = requests.get(metadata_url, headers=headers) + if response.status_code == 200: + document = response.json() + REGION = document.get("region") + return REGION + else: + raise Exception("메타데이터를 가져오는 데 실패했습니다. 상태 코드: {}".format(response.status_code)) + else: + raise Exception("토큰이 없습니다.") \ No newline at end of file diff --git a/collector/spot-dataset/aws/collector/server/sps/start_query.sh b/collector/spot-dataset/aws/collector/server/sps/start_query.sh new file mode 100644 index 00000000..9cd79487 --- /dev/null +++ b/collector/spot-dataset/aws/collector/server/sps/start_query.sh @@ -0,0 +1,9 @@ +#!/bin/bash +export PYTHONPATH=/home/ubuntu/.local/lib/python3.11/site-packages +export AWS_CONFIG_FILE=/home/ubuntu/.aws/config +export AWS_SHARED_CREDENTIALS_FILE=/home/ubuntu/.aws/credentials + +current_date=$(date -u '+%Y-%m-%dT%H:%M') +echo "Cron Job Executed at $current_date" > /home/ubuntu/cron_test.log + +python3 /home/ubuntu/collect_sps.py --timestamp "$current_date" > /home/ubuntu/cron_output.log 2>&1 From 637070582b553b859b885367a227d9cf060a5466 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Tue, 11 Feb 2025 23:24:42 +0900 Subject: [PATCH 02/38] Feat post processing --- .../post_processing_data/compare_data.py | 118 ++++++++++++ .../spotlake_post_processing.py | 172 ++++++++++++++++++ .../post_processing_data/upload_data.py | 124 +++++++++++++ 3 files changed, 414 insertions(+) create mode 100644 collector/spot-dataset/aws/collector/serverless/post_processing_data/compare_data.py create mode 100644 collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py create mode 100644 collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/compare_data.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/compare_data.py new file mode 100644 index 00000000..ada6ca54 --- /dev/null +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/compare_data.py @@ -0,0 +1,118 @@ +# ------ import module ------ +import pandas as pd +import numpy as np + +# ------ import user module ------ +from slack_msg_sender import send_slack_message + +# compare previous collected workload with current collected workload +# return changed workload +def compare(previous_df, current_df, workload_cols, feature_cols): + previous_df.loc[:,'Workload'] = previous_df[workload_cols].apply(lambda row: ':'.join(row.values.astype(str)), axis=1) + previous_df.loc[:,'Feature'] = previous_df[feature_cols].apply(lambda row: ':'.join(row.values.astype(str)), axis=1) + current_df.loc[:,'Workload'] = current_df[workload_cols].apply(lambda row: ':'.join(row.values.astype(str)), axis=1) + current_df.loc[:,'Feature'] = current_df[feature_cols].apply(lambda row: ':'.join(row.values.astype(str)), axis=1) + + current_indices = current_df[['Workload', 'Feature']].sort_values(by='Workload').index + current_values = current_df[['Workload', 'Feature']].sort_values(by='Workload').values + previous_indices = previous_df[['Workload', 'Feature']].sort_values(by='Workload').index + previous_values = previous_df[['Workload', 'Feature']].sort_values(by='Workload').values + + changed_indices = [] + removed_indices = [] + + prev_idx = 0 + curr_idx = 0 + while True: + if (curr_idx == len(current_indices)) and (prev_idx == len(previous_indices)): + break + elif curr_idx == len(current_indices): + prev_workload = previous_values[prev_idx][0] + if prev_workload not in current_values[:,0]: + removed_indices.append(previous_indices[prev_idx]) + prev_idx += 1 + continue + else: + send_slack_message(f"{prev_workload}, {curr_workload} workload error") + print(f"{prev_workload}, {curr_workload} workload error") + raise Exception('workload error') + break + elif prev_idx == len(previous_indices): + curr_workload = current_values[curr_idx][0] + curr_feature = current_values[curr_idx][1] + if curr_workload not in previous_values[:,0]: + changed_indices.append(current_indices[curr_idx]) + curr_idx += 1 + continue + else: + send_slack_message(f"{prev_workload}, {curr_workload} workload error") + print(f"{prev_workload}, {curr_workload} workload error") + raise Exception('workload error') + break + + prev_workload = previous_values[prev_idx][0] + prev_feature = previous_values[prev_idx][1] + curr_workload = current_values[curr_idx][0] + curr_feature = current_values[curr_idx][1] + + if prev_workload != curr_workload: + if curr_workload not in previous_values[:,0]: + changed_indices.append(current_indices[curr_idx]) + curr_idx += 1 + elif prev_workload not in current_values[:,0]: + removed_indices.append(previous_indices[prev_idx]) + prev_idx += 1 + continue + else: + send_slack_message(f"{prev_workload}, {curr_workload} workload error") + print(f"{prev_workload}, {curr_workload} workload error") + raise Exception('workload error') + else: + if prev_feature != curr_feature: + changed_indices.append(current_indices[curr_idx]) + curr_idx += 1 + prev_idx += 1 + changed_df = current_df.loc[changed_indices].drop(['Workload', 'Feature'], axis=1) + removed_df = previous_df.loc[removed_indices].drop(['Workload', 'Feature'], axis=1) + + for col in feature_cols: + removed_df[col] = 0 + + # removed_df have one more column, 'Ceased' + removed_df['Ceased'] = True + + return changed_df, removed_df + +# ------ Compare the values of T3 and T2 ------ +def compare_max_instance(merge_df, previous_df, target_capacities, target_capacity): + condition = (previous_df['InstanceType'] == merge_df['InstanceType']) & (previous_df['AZ'] == merge_df['AZ']) + current_df = merge_df + + current_df.loc[condition, 'T3'] = np.maximum( + previous_df.loc[condition, 'T3'], merge_df.loc[condition, 'T3'] + ) + current_df.loc[condition, 'T2'] = np.maximum( + previous_df.loc[condition, 'T2'], merge_df.loc[condition, 'T2'] + ) + + current_df.loc[condition & (merge_df['T3'] == target_capacities[target_capacity]), 'T2'] = target_capacities[target_capacity] + + if target_capacity == 0: + current_df.loc[condition & (merge_df['T3'] == 0), 'T3'] = 0 + current_df.loc[condition & (merge_df['T2'] == 0), 'T2'] = 0 + else: + # Merging collection and previous data + current_df = pd.merge( + current_df, + previous_df[['InstanceType', 'AZ', 'SPS']], + on=['InstanceType', 'AZ'], + how='left', + suffixes=('', '_new') + ) + # Overwrite SPS value of target capacity 1 + current_df['SPS_new'] = current_df['SPS_new'].dropna() + current_df['SPS'] = current_df['SPS_new'].combine_first(current_df['SPS']) + # Delete unnecessary column + current_df = current_df.drop(columns=['SPS_new']) + + return current_df \ No newline at end of file diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py new file mode 100644 index 00000000..c599fab6 --- /dev/null +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py @@ -0,0 +1,172 @@ +# ------ import module ------ +from datetime import datetime, timezone, timedelta +import boto3 +import pickle, gzip, json +import pandas as pd +import numpy as np +import os + +# ------ import user module ------ +from slack_msg_sender import send_slack_message +from upload_data import upload_timestream, update_latest, save_raw, update_query_selector, update_config +from compare_data import compare, compare_max_instance + +def main(): + # ------ Set time data ------ + start_time = datetime.now(timezone.utc) + timestamp = start_time.replace(minute=((start_time.minute // 10) * 10), second=0) - timedelta(minutes=10) + S3_DIR_NAME = timestamp.strftime('%Y/%m/%d') + S3_OBJECT_PREFIX = timestamp.strftime('%H-%M') + time_value = timestamp.strftime("%Y-%m-%d %H:%M:%S") + + # ------ Create Boto3 Session ------ + s3 = boto3.resource("s3") + + BUCKET_NAME = os.environ.get('S3_BUCKET') + BUCKET_FILE_PATH = os.environ.get('PARENT_PATH') + target_capacities = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50] + + # ------ Load Data from PKL File in S3 ------ + config_path = "config.txt" + text = s3.Object(BUCKET_NAME, f"config/{config_path}").get()["Body"].read().decode('utf-8').split("\n") + + target_capacity = int(text[0].strip()) + + keys = [line.format( + BUCKET_FILE_PATH=BUCKET_FILE_PATH, + S3_DIR_NAME=S3_DIR_NAME, + S3_OBJECT_PREFIX=S3_OBJECT_PREFIX, + target_capacity=target_capacities[target_capacity] + ) for line in text] + + try: + sps_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, keys[1].strip()).get()["Body"])) + spotinfo_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, keys[2].strip()).get()["Body"])) + ondemand_price_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, keys[3].strip()).get()["Body"])) + spot_price_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, keys[4].strip()).get()["Body"])) + except Exception as e: + send_slack_message(e) + print(e) + + # ------ Create a DF by Selecting Only The Columns Required ------ + try: + sps_df = sps_df[['InstanceType', 'Region', 'AZ', 'SPS', 'T3', 'T2']] + spotinfo_df = spotinfo_df[['InstanceType', 'Region', 'IF']] + ondemand_price_df = ondemand_price_df[['InstanceType', 'Region', 'OndemandPrice']] + spot_price_df = spot_price_df[['InstanceType', 'AZ', 'SpotPrice']] + except Exception as e: + send_slack_message(e) + print(e) + + # ------ Formatting Data ------ + spot_price_df['SpotPrice'] = spot_price_df['SpotPrice'].astype('float').round(5) + ondemand_price_df['OndemandPrice'] = ondemand_price_df['OndemandPrice'].astype('float').round(5) + + # ------ Need to Change to Outer Join ------ + merge_df = pd.merge(sps_df, spotinfo_df, how="outer") + merge_df = pd.merge(merge_df, ondemand_price_df, how="outer") + merge_df = pd.merge(merge_df, spot_price_df, how="outer") + + merge_df['Savings'] = 100.0 - (merge_df['SpotPrice'] * 100 / merge_df['OndemandPrice']) + merge_df['Savings'] = merge_df['Savings'].fillna(-1) + merge_df['SPS'] = merge_df['SPS'].fillna(-1) + merge_df['SpotPrice'] = merge_df['SpotPrice'].fillna(-1) + merge_df['OndemandPrice'] = merge_df['OndemandPrice'].fillna(-1) + merge_df['IF'] = merge_df['IF'].fillna(-1) + + merge_df['Savings'] = merge_df['Savings'].astype('int') + merge_df['SPS'] = merge_df['SPS'].astype('int') + merge_df['T3'] = merge_df['T3'].fillna(0).astype('int') + merge_df['T2'] = merge_df['T2'].fillna(0).astype('int') + + merge_df = merge_df.drop(merge_df[(merge_df['AZ'].isna()) | (merge_df['Region'].isna()) | (merge_df['InstanceType'].isna())].index) + + merge_df.reset_index(drop=True, inplace=True) + merge_df['Time'] = time_value + + end_time = datetime.now(timezone.utc) + print(f"Merging time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + # ------ Check The Previous DF File in S3 and Local ------ + previous_df = None + start_time = datetime.now(timezone.utc) + filename = 'latest_aws.json' + LATEST_PATH = f'{BUCKET_FILE_PATH}/latest_data/{filename}' + try: + previous_df = pd.DataFrame(json.load(s3.Object(BUCKET_NAME, LATEST_PATH).get()['Body'])) + + # Verify that the data is in the old format + columns_to_check = ["T3", "T2"] + existing_columns = [col for col in columns_to_check if col in previous_df.columns] + + if len(existing_columns) == 0: + raise + else: + previous_df = previous_df.drop(columns=['Id']) + except: + # If system is first time uploading data, make a new one and upload it to TSDB + try: + update_latest(merge_df) + save_raw(merge_df, timestamp) + upload_timestream(merge_df, timestamp) + update_config(config_path, text, target_capacity, target_capacities) + except Exception as e: + send_slack_message(e) + print(e) + end_time = datetime.now(timezone.utc) + print(f"Checking time of previous json file is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + return print("Can't load the previous df from s3 bucket or First run since changing the collector") + + end_time = datetime.now(timezone.utc) + print(f"Checking time of previous json file is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + start_time = datetime.now(timezone.utc) + # ------ Compare T3 and T2 Data ------ + current_df = compare_max_instance(merge_df, previous_df, target_capacities, target_capacity) + + # ------ Upload Merge DF to s3 Bucket ------ + try: + update_latest(current_df) + save_raw(current_df, timestamp) + except Exception as e: + send_slack_message(e) + print(e) + + # ------ Compare All Data ------ + workload_cols = ['InstanceType', 'Region', 'AZ'] + feature_cols = ['SPS', 'T3', 'T2', 'IF', 'SpotPrice', 'OndemandPrice'] + + changed_df, removed_df = compare(previous_df, current_df, workload_cols, feature_cols) # compare previous_df and current_df to extract changed rows) + end_time = datetime.now(timezone.utc) + print(f"Compare time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + start_time = datetime.now(timezone.utc) + # ------ Upload TSDB ------ + try: + upload_timestream(changed_df, timestamp) + upload_timestream(removed_df, timestamp) + except Exception as e: + send_slack_message(e) + print(e) + end_time = datetime.now(timezone.utc) + print(f"Uploading time to TSDB is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + start_time = datetime.now(timezone.utc) + # ------ Upload Spotlake Query Selector to S3 ------ + try: + update_query_selector(changed_df) + except Exception as e: + send_slack_message(e) + print(e) + end_time = datetime.now(timezone.utc) + print(f"Uploading time of query selector data is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + # ------ Write Target Capacity Value in Text File ------ + update_config(config_path, text, target_capacity, target_capacities) + +def lambda_handler(event, context): + start_time = datetime.now(timezone.utc) + main() + end_time = datetime.now(timezone.utc) + print(f"Running time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + return "Process completed successfully" \ No newline at end of file diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py new file mode 100644 index 00000000..3dc45451 --- /dev/null +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py @@ -0,0 +1,124 @@ +# ------ import module ------ +import boto3 +import pandas as pd +import os +import json +from botocore.config import Config + +# ------ import user module ------ +from slack_msg_sender import send_slack_message + +BUCKET_NAME = os.environ.get('S3_BUCKET') +BUCKET_FILE_PATH = os.environ.get('PARENT_PATH') + +DATABASE_NAME = os.environ.get('DATABASE') +AWS_TABLE_NAME = "aws" +write_client = boto3.client('timestream-write', config=Config(read_timeout=20, max_pool_connections=5000, retries={'max_attempts':10})) + +# Submit Batch To Timestream +def submit_batch(records, counter, recursive): + if recursive == 10: + return + try: + result = write_client.write_records(DatabaseName=DATABASE_NAME, TableName = AWS_TABLE_NAME, Records=records, CommonAttributes={}) + except write_client.exceptions.RejectedRecordsException as err: + re_records = [] + for rr in err.response["RejectedRecords"]: + send_slack_message(rr['Reason']) + print(rr['Reason']) + re_records.append(records[rr["RecordIndex"]]) + submit_batch(re_records, counter, recursive + 1) + except Exception as err: + send_slack_message(err) + print(err) + exit() + + +# Check Database And Table Are Exist and Upload Data to Timestream +def upload_timestream(data, timestamp): + time_value = str(int(timestamp.timestamp() * 1000)) + + records = [] + counter = 0 + for idx, row in data.iterrows(): + dimensions = [] + for column in data.columns: + if column in ['InstanceType', 'Region', 'AZ', 'OndemandPrice', 'Ceased']: + dimensions.append({'Name':column, 'Value': str(row[column])}) + submit_data = { + 'Dimensions': dimensions, + 'MeasureName': 'aws_values', + 'MeasureValues': [], + 'MeasureValueType': 'MULTI', + 'Time': time_value + } + for column, types in [('SPS', 'BIGINT'), ('T3', 'BIGINT'), ('T2', 'BIGINT'), ('IF', 'DOUBLE'), ('SpotPrice', 'DOUBLE')]: + submit_data['MeasureValues'].append({'Name': column, 'Value': str(row[column]), 'Type' : types}) + + records.append(submit_data) + counter += 1 + if len(records) == 100: + submit_batch(records, counter, 0) + records = [] + + if len(records) != 0: + submit_batch(records, counter, 0) + + +def update_latest(data): + # Upload file to use as previous collection data + filename = 'latest_aws.json' + LATEST_PATH = f'latest_data/{filename}' + + data['Id'] = data.index+1 + result = data.to_json(f"/tmp/{filename}", orient="records") + + s3 = boto3.resource('s3') + s3_client = boto3.client('s3') + + with open(f"/tmp/{filename}", 'rb') as f: + s3_client.upload_fileobj(f, BUCKET_NAME, LATEST_PATH) + object_acl = s3.ObjectAcl(BUCKET_NAME, LATEST_PATH) + response = object_acl.put(ACL='public-read') + + data.drop(['Id'], axis=1, inplace=True) + + +def update_query_selector(changed_df): + filename = 'query-selector-aws.json' + s3_path = f'query-selector/{filename}' + s3 = boto3.resource('s3') + query_selector_aws = pd.DataFrame(json.loads(s3.Object(BUCKET_NAME, s3_path).get()['Body'].read())) + query_selector_aws = pd.concat([query_selector_aws[['InstanceType', 'Region', 'AZ']], changed_df[['InstanceType', 'Region', 'AZ']]], axis=0, ignore_index=True).dropna().drop_duplicates(['InstanceType', 'Region', 'AZ']).reset_index(drop=True) + result = query_selector_aws.to_json(f"/tmp/{filename}", orient="records") + s3 = boto3.client('s3') + with open(f"/tmp/{filename}", 'rb') as f: + s3.upload_fileobj(f, BUCKET_NAME, s3_path) + s3 = boto3.resource('s3') + object_acl = s3.ObjectAcl(BUCKET_NAME, s3_path) + response = object_acl.put(ACL='public-read') + + +def save_raw(data, timestamp): + s3_dir_name = timestamp.strftime("%Y/%m/%d") + s3_obj_name = timestamp.strftime("%H-%M-%S") + + rawdata = data[['Time', 'InstanceType', 'Region', 'AZ', 'SPS', 'T3', 'T2', 'IF', 'OndemandPrice', 'SpotPrice', 'Savings']] + SAVE_FILENAME = f"/tmp/{s3_obj_name}.csv.gz" + rawdata.to_csv(SAVE_FILENAME, index=False, compression="gzip") + + s3 = boto3.client('s3') + + with open(SAVE_FILENAME, 'rb') as f: + s3.upload_fileobj(f, BUCKET_NAME, f"{BUCKET_FILE_PATH}/{s3_dir_name}/{s3_obj_name}.csv.gz") + + +def update_config(config_path, text, target_capacity, target_capacities): + s3_client = boto3.client('s3') + with open(f"/tmp/{config_path}", "w") as file: + for i, line in enumerate(text): + if i == 0: + file.write(f"{(target_capacity + 1) % len(target_capacities)}\n") + else: + file.write(f"{line}\n") + s3_client.upload_file(f"/tmp/{config_path}", BUCKET_NAME, f"config/{config_path}") From 39490f823774544a959cf3af9a608ee902bb7caa Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Tue, 11 Feb 2025 23:24:50 +0900 Subject: [PATCH 03/38] Feat workload --- .../serverless/workload/load_metadata.py | 53 ++++++ .../serverless/workload/make_workloads.py | 168 ++++++++++++++++++ 2 files changed, 221 insertions(+) create mode 100644 collector/spot-dataset/aws/collector/serverless/workload/load_metadata.py create mode 100644 collector/spot-dataset/aws/collector/serverless/workload/make_workloads.py diff --git a/collector/spot-dataset/aws/collector/serverless/workload/load_metadata.py b/collector/spot-dataset/aws/collector/serverless/workload/load_metadata.py new file mode 100644 index 00000000..6045ed65 --- /dev/null +++ b/collector/spot-dataset/aws/collector/serverless/workload/load_metadata.py @@ -0,0 +1,53 @@ +import boto3 +from collections import Counter + + +# get all available regions +def get_regions(session: boto3.session.Session, region='us-east-1') -> list: + client = session.client('ec2', region_name=region) + describe_args = { + 'AllRegions': False + } + return [region['RegionName'] for region in client.describe_regions(**describe_args)['Regions']] + + +# get instance-az information by region +def get_region_instances(session: boto3.session.Session, region: str): + client = session.client('ec2', region_name=region) + describe_args = { + 'LocationType': 'availability-zone', + } + region_instances = [] + while True: + response = client.describe_instance_type_offerings(**describe_args) + for obj in response['InstanceTypeOfferings']: + it, _, az = obj.values() + region_instances.append((region, it)) + if 'NextToken' not in response: + break + describe_args['NextToken'] = response['NextToken'] + + return region_instances + + +# calculate number of az by region +# first, get region information using get_regions +# second, get az information by region using get_region_instances +def num_az_by_region(): + session = boto3.session.Session() + + regions = get_regions(session) + + total_counter = Counter() + for idx, region in enumerate(regions): + region_counter = Counter(get_region_instances(session, region)) + total_counter += region_counter + + workloads = dict() + for key, cnt in total_counter.items(): + region, it = key + if it not in workloads: + workloads[it] = [] + workloads[it].append((region, cnt)) + + return workloads diff --git a/collector/spot-dataset/aws/collector/serverless/workload/make_workloads.py b/collector/spot-dataset/aws/collector/serverless/workload/make_workloads.py new file mode 100644 index 00000000..ffd420c3 --- /dev/null +++ b/collector/spot-dataset/aws/collector/serverless/workload/make_workloads.py @@ -0,0 +1,168 @@ +# reference +# https://developers.google.com/optimization/bin/bin_packing + +# ------ import module ------ +import boto3 +import botocore +import pickle +import os, gzip +from datetime import datetime, timezone, timedelta +from ortools.linear_solver import pywraplp +import io + +# ------ import user module ------ +from load_metadata import num_az_by_region +from slack_msg_sender import send_slack_message + + +# create object of bin packing input data +def create_data_model(weights, capacity): + data = {} + data['weights'] = weights + data['items'] = list(range(len(weights))) + data['bins'] = data['items'] + data['bin_capacity'] = capacity + return data + + +# run bin packing with algorithm name +def bin_packing(weights, capacity, algorithm): + bin_index_list = [] + data = create_data_model(weights, capacity) + solver = pywraplp.Solver.CreateSolver(algorithm) + + x = {} + for i in data['items']: + for j in data['bins']: + x[(i, j)] = solver.IntVar(0, 1, 'x_%i_%i' % (i, j)) + + y = {} + for j in data['bins']: + y[j] = solver.IntVar(0, 1, 'y[%i]' % j) + + for i in data['items']: + solver.Add(sum(x[i, j] for j in data['bins']) == 1) + + for j in data['bins']: + solver.Add( + sum(x[(i, j)] * data['weights'][i] for i in data['items']) <= y[j] * + data['bin_capacity']) + + solver.Minimize(solver.Sum([y[j] for j in data['bins']])) + status = solver.Solve() + if status == pywraplp.Solver.OPTIMAL: + for j in data['bins']: + if y[j].solution_value() == 1: + bin_items = [] + bin_weight = 0 + for i in data['items']: + if x[i, j].solution_value() > 0: + bin_items.append(i) + bin_weight += data['weights'][i] + if bin_weight > 0: + bin_index_list.append((bin_items, bin_weight)) + return bin_index_list + else: + send_slack_message("The problem does not have an optimal solution.") + print('The problem does not have an optimal solution.') + + +# run bin packing algorithm to instance-region workloads +def workload_bin_packing(query, capacity, algorithm): + weights = [weight for instance, weight in query] + bin_index_list = bin_packing(weights, 10, algorithm) + + binpacked = [] + + for bin_index, bin_weight in bin_index_list: + binpacked.append([(query[x][0], query[x][1]) for x in bin_index]) + + return binpacked + + +def get_binpacked_workload(filedate): + s3_client = boto3.client('s3') + s3_resource = boto3.resource('s3') + + workloads = num_az_by_region() + + start_time = datetime.now(timezone.utc) + s3_resource.Object(os.environ.get('S3_BUCKET'), f"monitoring/{filedate}/workloads.pkl").put(Body=pickle.dumps(workloads)) + end_time = datetime.now(timezone.utc) + print(f"Upload time used for minitoring is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + result_binpacked = {} + + for instance, query in workloads.items(): + result_binpacked[instance] = workload_bin_packing(query, 10, 'CBC') + + user_queries_list = [] + user_queries = [] + for instance, queries in result_binpacked.items(): + for query in queries: + new_query = [instance, [], 0] + for tup in query: + new_query[1].append(tup[0]) + new_query[2] += tup[1] + user_queries.append(new_query) + if len(user_queries) == 50: + user_queries_list.append(user_queries) + user_queries = [] + + if len(user_queries) != 0: + user_queries_list.append(user_queries) + user_queries = [] + + start_time = datetime.now(timezone.utc) + try: + buffer = io.BytesIO() + pickle.dump(user_queries_list, buffer) + buffer.seek(0) + + compressed_buffer = io.BytesIO() + with gzip.GzipFile(fileobj=compressed_buffer, mode='wb') as gz: + gz.write(buffer.getvalue()) + compressed_buffer.seek(0) + except Exception as e: + send_slack_message(e) + + try: + s3_client.upload_fileobj(compressed_buffer, os.environ.get('S3_BUCKET'), f"{os.environ.get('PARENT_PATH')}/workloads/{filedate}/binpacked_workloads.pkl.gz") + except Exception as e: + send_slack_message(e) + end_time = datetime.now(timezone.utc) + print(f"Upload time used for collecting is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + return user_queries_list + +def lambda_handler(event, context): + # ------ Set time data ------ + start_time = datetime.now(timezone.utc) + timestamp = start_time.replace(minute=((start_time.minute // 10) * 10), second=0) + timedelta(days=1) + S3_DIR_NAME = timestamp.strftime('%Y/%m/%d') + + # ------ Collect Spot Price ------ + try: + workload = get_binpacked_workload(S3_DIR_NAME) + except botocore.exceptions.ClientError as e: + send_slack_message(e) + print(e) + end_time = datetime.now(timezone.utc) + print(f"collecting time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + # ------ Save Raw Data in S3 ------ + s3_client = boto3.client('s3') + try: + saving_start_time = datetime.now(timezone.utc) + buffer = io.BytesIO() + pickle.dump(workload, buffer) + buffer.seek(0) + + s3_client.upload_fileobj(buffer, os.environ.get('S3_BUCKET'), f"{os.environ.get('PARENT_PATH')}/localfile/workloads.pkl") + except Exception as e: + send_slack_message(e) + print(e) + end_time = datetime.now(timezone.utc) + print(f"Upload time used for back-up is {(end_time - saving_start_time).total_seconds() * 1000 / 60000:.2f} min") + print(f"Running time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + return "Process completed successfully" \ No newline at end of file From 9c530bea4c342e91e02ba0efe2a31ce8155cf2c7 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Tue, 11 Feb 2025 23:28:44 +0900 Subject: [PATCH 04/38] Fix boto3 session --- collector/spot-dataset/aws/collector/server/sps/collect_sps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index 17ac09df..a28e5588 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -14,7 +14,7 @@ def main(): # ------ Setting Client ------ - session = boto3.session.Session(profile_name="spotlake") + session = boto3.session.Session() s3 = session.resource("s3") s3_client = session.client("s3", region_name="us-west-2") From 123ad250fe24968d27f8ccad3242e77df145e33e Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Tue, 11 Feb 2025 23:30:34 +0900 Subject: [PATCH 05/38] Fix path --- collector/spot-dataset/aws/collector/server/sps/collect_sps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index a28e5588..70e63d00 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -20,7 +20,7 @@ def main(): # ------ Create Index Files ------ # memo: change the cloud path - CURRENT_PATH = "/home/ubuntu/spotlake/collector/spot-dataset/aws/collector/" + CURRENT_PATH = "/home/ubuntu/spotlake/collector/spot-dataset/aws/collector/server/sps" CREDENTIAL_START_INDEX_FILE_NAME = f"{CURRENT_PATH}/credential_index.txt" if not os.path.exists(CREDENTIAL_START_INDEX_FILE_NAME): From f8d685827a5ee2f36a5c3832b03ed77820904995 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 00:32:27 +0900 Subject: [PATCH 06/38] Fix: path issue --- .../spot-dataset/aws/collector/server/sps/collect_sps.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index 70e63d00..0e3603ed 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -104,10 +104,10 @@ def main(): print(f"계정 시작 인덱스 : {current_credential_index}") # ------ Load Credential File ------ + SPS_QUERY_BUCKET_NAME = "sps-query-data" CREDENTIAL_FILE_PATH = "aws/credentials/credential_3699.csv" - credentials = None try: - csv_content = s3.Object(BUCKET_NAME, CREDENTIAL_FILE_PATH).get()["Body"].read().decode('utf-8') + csv_content = s3.Object(SPS_QUERY_BUCKET_NAME, CREDENTIAL_FILE_PATH).get()["Body"].read().decode('utf-8') credentials = pd.read_csv(StringIO(csv_content)) except Exception as e: send_slack_message(e) From a127a6bdbd24563b9b9597c0dd7298ed0ce2569a Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 00:35:10 +0900 Subject: [PATCH 07/38] Fix: path --- collector/spot-dataset/aws/collector/server/sps/collect_sps.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index 0e3603ed..92066c39 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -105,7 +105,8 @@ def main(): # ------ Load Credential File ------ SPS_QUERY_BUCKET_NAME = "sps-query-data" - CREDENTIAL_FILE_PATH = "aws/credentials/credential_3699.csv" + CREDENTIAL_FILE_PATH = "credentials/credential_3699.csv" + credentials = None try: csv_content = s3.Object(SPS_QUERY_BUCKET_NAME, CREDENTIAL_FILE_PATH).get()["Body"].read().decode('utf-8') credentials = pd.read_csv(StringIO(csv_content)) From d1a1e993c41fd9c603b98a160d01ceb4f94279e4 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 00:37:47 +0900 Subject: [PATCH 08/38] Temp comment out requests.post in slack_msg_sender --- utility/slack_msg_sender.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utility/slack_msg_sender.py b/utility/slack_msg_sender.py index 3cebdd6b..f534b94d 100644 --- a/utility/slack_msg_sender.py +++ b/utility/slack_msg_sender.py @@ -18,7 +18,7 @@ def send_slack_message(msg): "text": message } - requests.post(url, json=slack_data) + # requests.post(url, json=slack_data) def get_webhook_url(): From 7b27cb0b014a43b3ccf21d1aae124a71d19e83df Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 00:41:21 +0900 Subject: [PATCH 09/38] Fix path --- collector/spot-dataset/aws/collector/server/sps/collect_sps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index 92066c39..a4a16fcb 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -105,7 +105,7 @@ def main(): # ------ Load Credential File ------ SPS_QUERY_BUCKET_NAME = "sps-query-data" - CREDENTIAL_FILE_PATH = "credentials/credential_3699.csv" + CREDENTIAL_FILE_PATH = "credential/credential_3699.csv" credentials = None try: csv_content = s3.Object(SPS_QUERY_BUCKET_NAME, CREDENTIAL_FILE_PATH).get()["Body"].read().decode('utf-8') From c18c0373fb4c2caea7dc09ff017f00720270e337 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 00:49:13 +0900 Subject: [PATCH 10/38] Change test s3 --- collector/spot-dataset/aws/collector/server/sps/collect_sps.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index a4a16fcb..2da87e84 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -169,6 +169,7 @@ def main(): start_time = datetime.now(timezone.utc) # ------ Save Dataframe File ------ + TEST_BUCKET_NAME = "sps-collector" try: object_name = f"{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl" saved_filename = f"{CURRENT_PATH}/" + f"{object_name}" @@ -179,7 +180,7 @@ def main(): send_slack_message(e) print(e) # memo: change the saving cloud path - s3_client.upload_fileobj(open(f"{saved_filename}.gz", "rb"), BUCKET_NAME, f"rawdata/aws/sps/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl.gz") + s3_client.upload_fileobj(open(f"{saved_filename}.gz", "rb"), TEST_BUCKET_NAME, f"rawdata/aws/sps/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl.gz") os.remove(f"{saved_filename}") os.remove(f"{saved_filename}.gz") except Exception as e: From 3c655733d723e8dc7dfc2a57eff76183dcb9ee76 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 00:55:27 +0900 Subject: [PATCH 11/38] Fix shell script --- .../spot-dataset/aws/collector/server/sps/start_query.sh | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/start_query.sh b/collector/spot-dataset/aws/collector/server/sps/start_query.sh index 9cd79487..b0c559f0 100644 --- a/collector/spot-dataset/aws/collector/server/sps/start_query.sh +++ b/collector/spot-dataset/aws/collector/server/sps/start_query.sh @@ -1,9 +1,5 @@ #!/bin/bash -export PYTHONPATH=/home/ubuntu/.local/lib/python3.11/site-packages -export AWS_CONFIG_FILE=/home/ubuntu/.aws/config -export AWS_SHARED_CREDENTIALS_FILE=/home/ubuntu/.aws/credentials - current_date=$(date -u '+%Y-%m-%dT%H:%M') echo "Cron Job Executed at $current_date" > /home/ubuntu/cron_test.log -python3 /home/ubuntu/collect_sps.py --timestamp "$current_date" > /home/ubuntu/cron_output.log 2>&1 +python3 /home/ubuntu/spotlake/collector/spot-dataset/aws/collector/server/sps/collect_sps.py --timestamp "$current_date" > /home/ubuntu/cron_output.log 2>&1 From be8edd9ad5c9ebba5d00b7b2279cbb5ae604897a Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 12:25:33 +0900 Subject: [PATCH 12/38] Fix tmp break --- collector/spot-dataset/aws/collector/server/sps/collect_sps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index 2da87e84..0de39627 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -143,7 +143,7 @@ def main(): send_slack_message(e) print(e) raise e - break + sps_df = pd.concat(df_list, axis=0, ignore_index=True) except Exception as e: message = f"error at query_sps\nerror : {e}" From b6639e7ba854e8fe9e26ee9be29f1bd4d3613671 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 12:26:14 +0900 Subject: [PATCH 13/38] Fix enable message sending --- utility/slack_msg_sender.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utility/slack_msg_sender.py b/utility/slack_msg_sender.py index f534b94d..3cebdd6b 100644 --- a/utility/slack_msg_sender.py +++ b/utility/slack_msg_sender.py @@ -18,7 +18,7 @@ def send_slack_message(msg): "text": message } - # requests.post(url, json=slack_data) + requests.post(url, json=slack_data) def get_webhook_url(): From f8c75789daa917aa4565e21558692430ded8de27 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 12:55:26 +0900 Subject: [PATCH 14/38] Refactor const --- .../aws/collector/server/sps/collect_sps.py | 52 +++++++++---------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index 0de39627..7e4b1816 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -13,20 +13,26 @@ from sps_query_api import query_sps def main(): + # ------ Setting Constants ------ + BUCKET_NAME = "sps-collector" # test + CURRENT_LOCAL_BASE_PATH = "/home/ubuntu/spotlake/collector/spot-dataset/aws/collector/server/sps" + WORKLOAD_BASE_PATH = "rawdata/aws/workloads" + SPS_BASE_PATH = "rawdata/aws/sps" + CREDENTIAL_FILE_PATH = "credential/credential_3699.csv" + LOG_GROUP_NAME = "SPS-Server-Data-Count" + LOG_STREAM_NAME = "aws" + # ------ Setting Client ------ session = boto3.session.Session() s3 = session.resource("s3") s3_client = session.client("s3", region_name="us-west-2") # ------ Create Index Files ------ - # memo: change the cloud path - CURRENT_PATH = "/home/ubuntu/spotlake/collector/spot-dataset/aws/collector/server/sps" - - CREDENTIAL_START_INDEX_FILE_NAME = f"{CURRENT_PATH}/credential_index.txt" + CREDENTIAL_START_INDEX_FILE_NAME = f"{CURRENT_LOCAL_BASE_PATH}/credential_index.txt" if not os.path.exists(CREDENTIAL_START_INDEX_FILE_NAME): with open(CREDENTIAL_START_INDEX_FILE_NAME, 'w') as file: file.write('0\n0') - TARGET_CAPACITY_INDEX_FILE_NAME = f"{CURRENT_PATH}/target_capacity_index.txt" + TARGET_CAPACITY_INDEX_FILE_NAME = f"{CURRENT_LOCAL_BASE_PATH}/target_capacity_index.txt" if not os.path.exists(TARGET_CAPACITY_INDEX_FILE_NAME): with open(TARGET_CAPACITY_INDEX_FILE_NAME, 'w') as file: file.write('0\n0') @@ -58,22 +64,19 @@ def main(): target_capacity = target_capacities[target_capacity_index] # ------ Load Workload File ------- - BUCKET_NAME = "spotlake" - BUCKET_FILE_PATH = "rawdata/aws/workloads" - start_time = datetime.now(timezone.utc) workload = None try: - key = f"{BUCKET_FILE_PATH}/{S3_DIR_NAME}/binpacked_workloads.pkl.gz" + key = f"{WORKLOAD_BASE_PATH}/{S3_DIR_NAME}/binpacked_workloads.pkl.gz" workload = pickle.load(gzip.open(s3.Object(BUCKET_NAME, key).get()["Body"])) - local_workload_path = f"{CURRENT_PATH}/{date}_binpacked_workloads.pkl.gz" + local_workload_path = f"{CURRENT_LOCAL_BASE_PATH}/{date}_binpacked_workloads.pkl.gz" # workload파일을 새로 받았다면 다운로드 if not os.path.exists(local_workload_path): - for filename in os.listdir(f"{CURRENT_PATH}"): + for filename in os.listdir(f"{CURRENT_LOCAL_BASE_PATH}"): if "_binpacked_workloads.pkl.gz" in filename: - os.remove(f"{CURRENT_PATH}/{filename}") + os.remove(f"{CURRENT_LOCAL_BASE_PATH}/{filename}") s3_client.download_file(BUCKET_NAME, key, local_workload_path) # workload 파일이 바뀌었으므로 계정 묶음 change @@ -89,10 +92,10 @@ def main(): send_slack_message(message) print(message) is_local = False - for filename in os.listdir(f"{CURRENT_PATH}"): + for filename in os.listdir(f"{CURRENT_LOCAL_BASE_PATH}"): if "_binpacked_workloads.pkl.gz" in filename: - print(f"로컬 워크로드 파일 {CURRENT_PATH}/{filename} 사용") - with open(f"{CURRENT_PATH}/{filename}", 'rb') as f: + print(f"로컬 워크로드 파일 {CURRENT_LOCAL_BASE_PATH}/{filename} 사용") + with open(f"{CURRENT_LOCAL_BASE_PATH}/{filename}", 'rb') as f: workload = pickle.load(gzip.open(f)) is_local = True break @@ -104,11 +107,10 @@ def main(): print(f"계정 시작 인덱스 : {current_credential_index}") # ------ Load Credential File ------ - SPS_QUERY_BUCKET_NAME = "sps-query-data" - CREDENTIAL_FILE_PATH = "credential/credential_3699.csv" + credentials = None try: - csv_content = s3.Object(SPS_QUERY_BUCKET_NAME, CREDENTIAL_FILE_PATH).get()["Body"].read().decode('utf-8') + csv_content = s3.Object(BUCKET_NAME, CREDENTIAL_FILE_PATH).get()["Body"].read().decode('utf-8') credentials = pd.read_csv(StringIO(csv_content)) except Exception as e: send_slack_message(e) @@ -169,10 +171,9 @@ def main(): start_time = datetime.now(timezone.utc) # ------ Save Dataframe File ------ - TEST_BUCKET_NAME = "sps-collector" try: object_name = f"{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl" - saved_filename = f"{CURRENT_PATH}/" + f"{object_name}" + saved_filename = f"{CURRENT_LOCAL_BASE_PATH}/" + f"{object_name}" try: pickle.dump(sps_df, open(saved_filename, "wb")) gzip.open(f"{saved_filename}.gz", "wb").writelines(open(f"{saved_filename}", "rb")) @@ -180,7 +181,7 @@ def main(): send_slack_message(e) print(e) # memo: change the saving cloud path - s3_client.upload_fileobj(open(f"{saved_filename}.gz", "rb"), TEST_BUCKET_NAME, f"rawdata/aws/sps/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl.gz") + s3_client.upload_fileobj(open(f"{saved_filename}.gz", "rb"), BUCKET_NAME, f"{SPS_BASE_PATH}/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl.gz") os.remove(f"{saved_filename}") os.remove(f"{saved_filename}.gz") except Exception as e: @@ -202,17 +203,14 @@ def main(): # ------ Upload Collecting Data Number at Cloud Logs ------ log_client = session.client('logs', 'us-west-2') # memo: change the log group name - log_group_name = "SPS-Server-Data-Count" - log_stream_name = "aws" - try: message = json.dumps({"MUMBER_ROWS" : sps_df.shape[0]}) timestamp = int(datetime.now(timezone.utc).timestamp() * 1000) try: response = log_client.put_log_events( - logGroupName = log_group_name, - logStreamName = log_stream_name, - logEvents = [ + logGroupName=LOG_GROUP_NAME, + logStreamName=LOG_STREAM_NAME, + logEvents=[ { 'timestamp' : timestamp, 'message' : message From ac193f27f9fb672e5c02022b9e0a6f525baaba97 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 13:51:50 +0900 Subject: [PATCH 15/38] Format unnecessary whitespace & Update script permissions --- collector/spot-dataset/aws/collector/server/sps/collect_sps.py | 1 - collector/spot-dataset/aws/collector/server/sps/start_query.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) mode change 100644 => 100755 collector/spot-dataset/aws/collector/server/sps/start_query.sh diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index 7e4b1816..a8085e65 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -107,7 +107,6 @@ def main(): print(f"계정 시작 인덱스 : {current_credential_index}") # ------ Load Credential File ------ - credentials = None try: csv_content = s3.Object(BUCKET_NAME, CREDENTIAL_FILE_PATH).get()["Body"].read().decode('utf-8') diff --git a/collector/spot-dataset/aws/collector/server/sps/start_query.sh b/collector/spot-dataset/aws/collector/server/sps/start_query.sh old mode 100644 new mode 100755 index b0c559f0..453ea114 --- a/collector/spot-dataset/aws/collector/server/sps/start_query.sh +++ b/collector/spot-dataset/aws/collector/server/sps/start_query.sh @@ -2,4 +2,4 @@ current_date=$(date -u '+%Y-%m-%dT%H:%M') echo "Cron Job Executed at $current_date" > /home/ubuntu/cron_test.log -python3 /home/ubuntu/spotlake/collector/spot-dataset/aws/collector/server/sps/collect_sps.py --timestamp "$current_date" > /home/ubuntu/cron_output.log 2>&1 +python3 /home/ubuntu/spotlake/collector/spot-dataset/aws/collector/server/sps/collect_sps.py --timestamp "$current_date" > /home/ubuntu/cron_output.log 2>&1 \ No newline at end of file From 2c6c68a1206b6817a652efdf21d88fe15853885d Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 14:40:09 +0900 Subject: [PATCH 16/38] Refactor time handling and del config file logic --- .../spotlake_post_processing.py | 65 ++++++++++--------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py index c599fab6..9b27bbf5 100644 --- a/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py @@ -12,38 +12,45 @@ from compare_data import compare, compare_max_instance def main(): - # ------ Set time data ------ start_time = datetime.now(timezone.utc) - timestamp = start_time.replace(minute=((start_time.minute // 10) * 10), second=0) - timedelta(minutes=10) - S3_DIR_NAME = timestamp.strftime('%Y/%m/%d') - S3_OBJECT_PREFIX = timestamp.strftime('%H-%M') - time_value = timestamp.strftime("%Y-%m-%d %H:%M:%S") + # ------ Set Constants ------ + BUCKET_NAME = os.environ.get('S3_BUCKET') + BUCKET_FILE_PATH = os.environ.get('PARENT_PATH') + + TIMESTAMP = start_time.replace(minute=((start_time.minute // 10) * 10), second=0) - timedelta(minutes=10) + S3_DIR_NAME = TIMESTAMP.strftime('%Y/%m/%d') + S3_OBJECT_PREFIX = TIMESTAMP.strftime('%H-%M') + + SPS_FILE_PREFIX = f"{BUCKET_FILE_PATH}/sps/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl.gz" + SPOTIF_FILE_NAME = f"{BUCKET_FILE_PATH}/spot_if/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_spot_if.pkl.gz" + ONDEMAND_PRICE_FILE_NAME = f"{BUCKET_FILE_PATH}/ondemand_price/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_ondemand_price.pkl.gz" + SPOTPRICE_FILE_NAME = f"{BUCKET_FILE_PATH}/spot_price/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_spot_price.pkl.gz" + + # ------ Set time data ------ + time_value = TIMESTAMP.strftime("%Y-%m-%d %H:%M:%S") # ------ Create Boto3 Session ------ s3 = boto3.resource("s3") + s3_client = boto3.client('s3') - BUCKET_NAME = os.environ.get('S3_BUCKET') - BUCKET_FILE_PATH = os.environ.get('PARENT_PATH') target_capacities = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50] - # ------ Load Data from PKL File in S3 ------ - config_path = "config.txt" - text = s3.Object(BUCKET_NAME, f"config/{config_path}").get()["Body"].read().decode('utf-8').split("\n") - - target_capacity = int(text[0].strip()) + # ------ Find Sps File in S3 ------ + sps_file_list = s3_client.list_objects_v2(Bucket=BUCKET_NAME, Prefix=SPS_FILE_PREFIX) + sps_files = [] + for obj in sps_file_list['Contents']: + if obj['Key'].startswith(f"{SPS_FILE_PREFIX}/{S3_OBJECT_PREFIX}"): + sps_files.append(obj['Key']) - keys = [line.format( - BUCKET_FILE_PATH=BUCKET_FILE_PATH, - S3_DIR_NAME=S3_DIR_NAME, - S3_OBJECT_PREFIX=S3_OBJECT_PREFIX, - target_capacity=target_capacities[target_capacity] - ) for line in text] + sps_file_name = sps_files[0] + target_capacity = int(sps_file_name.split('/')[-1].split('_')[2].split('.')[0]) + # ------ Load Data from PKL File in S3 ------ try: - sps_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, keys[1].strip()).get()["Body"])) - spotinfo_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, keys[2].strip()).get()["Body"])) - ondemand_price_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, keys[3].strip()).get()["Body"])) - spot_price_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, keys[4].strip()).get()["Body"])) + sps_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, sps_file_name.strip()).get()["Body"])) + spotinfo_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, SPOTIF_FILE_NAME.strip()).get()["Body"])) + ondemand_price_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, ONDEMAND_PRICE_FILE_NAME.strip()).get()["Body"])) + spot_price_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, SPOTPRICE_FILE_NAME.strip()).get()["Body"])) except Exception as e: send_slack_message(e) print(e) @@ -107,9 +114,8 @@ def main(): # If system is first time uploading data, make a new one and upload it to TSDB try: update_latest(merge_df) - save_raw(merge_df, timestamp) - upload_timestream(merge_df, timestamp) - update_config(config_path, text, target_capacity, target_capacities) + save_raw(merge_df, TIMESTAMP) + upload_timestream(merge_df, TIMESTAMP) except Exception as e: send_slack_message(e) print(e) @@ -127,7 +133,7 @@ def main(): # ------ Upload Merge DF to s3 Bucket ------ try: update_latest(current_df) - save_raw(current_df, timestamp) + save_raw(current_df, TIMESTAMP) except Exception as e: send_slack_message(e) print(e) @@ -143,8 +149,8 @@ def main(): start_time = datetime.now(timezone.utc) # ------ Upload TSDB ------ try: - upload_timestream(changed_df, timestamp) - upload_timestream(removed_df, timestamp) + upload_timestream(changed_df, TIMESTAMP) + upload_timestream(removed_df, TIMESTAMP) except Exception as e: send_slack_message(e) print(e) @@ -161,9 +167,6 @@ def main(): end_time = datetime.now(timezone.utc) print(f"Uploading time of query selector data is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") - # ------ Write Target Capacity Value in Text File ------ - update_config(config_path, text, target_capacity, target_capacities) - def lambda_handler(event, context): start_time = datetime.now(timezone.utc) main() From bcc8f25f03eff632b04da155715dcc2dccdd3862 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 15:57:20 +0900 Subject: [PATCH 17/38] Refactor file saving and uploading logic in collect_sps.py --- .../aws/collector/server/sps/collect_sps.py | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index a8085e65..47cac5e1 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -172,20 +172,24 @@ def main(): # ------ Save Dataframe File ------ try: object_name = f"{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl" - saved_filename = f"{CURRENT_LOCAL_BASE_PATH}/" + f"{object_name}" - try: - pickle.dump(sps_df, open(saved_filename, "wb")) - gzip.open(f"{saved_filename}.gz", "wb").writelines(open(f"{saved_filename}", "rb")) - except Exception as e: - send_slack_message(e) - print(e) - # memo: change the saving cloud path - s3_client.upload_fileobj(open(f"{saved_filename}.gz", "rb"), BUCKET_NAME, f"{SPS_BASE_PATH}/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl.gz") - os.remove(f"{saved_filename}") - os.remove(f"{saved_filename}.gz") + saved_filename = f"{CURRENT_LOCAL_BASE_PATH}/{object_name}" + gz_filename = f"{saved_filename}.gz" + + with open(saved_filename, "wb") as f: + pickle.dump(sps_df, f) + + with open(saved_filename, "rb") as f_in, gzip.open(gz_filename, "wb") as f_out: + f_out.writelines(f_in) + + with open(gz_filename, "rb") as f: + s3_client.upload_fileobj(f, BUCKET_NAME, f"{SPS_BASE_PATH}/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl.gz") + + os.remove(saved_filename) + os.remove(gz_filename) + except Exception as e: send_slack_message(e) - print(e) + print(f"파일 저장 및 업로드 중 오류 발생: {e}") raise e end_time = datetime.now(timezone.utc) print(f"Saving time of DF File is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") From 7a17c0eb336608d19bbb0898e21cb8cf0511451d Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 18:20:38 +0900 Subject: [PATCH 18/38] Refactor readability and error handling --- .../spotlake_post_processing.py | 200 ++++++++---------- 1 file changed, 92 insertions(+), 108 deletions(-) diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py index 9b27bbf5..6559c4b7 100644 --- a/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py @@ -12,7 +12,9 @@ from compare_data import compare, compare_max_instance def main(): + print("Start Lambda Function") start_time = datetime.now(timezone.utc) + # ------ Set Constants ------ BUCKET_NAME = os.environ.get('S3_BUCKET') BUCKET_FILE_PATH = os.environ.get('PARENT_PATH') @@ -21,151 +23,133 @@ def main(): S3_DIR_NAME = TIMESTAMP.strftime('%Y/%m/%d') S3_OBJECT_PREFIX = TIMESTAMP.strftime('%H-%M') - SPS_FILE_PREFIX = f"{BUCKET_FILE_PATH}/sps/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_sps_{target_capacity}.pkl.gz" + SPS_FILE_PREFIX = f"{BUCKET_FILE_PATH}/sps/{S3_DIR_NAME}" SPOTIF_FILE_NAME = f"{BUCKET_FILE_PATH}/spot_if/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_spot_if.pkl.gz" - ONDEMAND_PRICE_FILE_NAME = f"{BUCKET_FILE_PATH}/ondemand_price/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_ondemand_price.pkl.gz" + ONDEMAND_PRICE_FILE_NAME = f"{BUCKET_FILE_PATH}/ondemand_price/{S3_DIR_NAME}/ondemand_price.pkl.gz" SPOTPRICE_FILE_NAME = f"{BUCKET_FILE_PATH}/spot_price/{S3_DIR_NAME}/{S3_OBJECT_PREFIX}_spot_price.pkl.gz" # ------ Set time data ------ time_value = TIMESTAMP.strftime("%Y-%m-%d %H:%M:%S") + + try: + # ------ Create Boto3 Session ------ + s3 = boto3.resource("s3") + s3_client = boto3.client('s3') - # ------ Create Boto3 Session ------ - s3 = boto3.resource("s3") - s3_client = boto3.client('s3') - - target_capacities = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50] + target_capacities = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50] - # ------ Find Sps File in S3 ------ - sps_file_list = s3_client.list_objects_v2(Bucket=BUCKET_NAME, Prefix=SPS_FILE_PREFIX) - sps_files = [] - for obj in sps_file_list['Contents']: - if obj['Key'].startswith(f"{SPS_FILE_PREFIX}/{S3_OBJECT_PREFIX}"): - sps_files.append(obj['Key']) + # ------ Find Sps File in S3 ------ + sps_file_list = s3_client.list_objects_v2(Bucket=BUCKET_NAME, Prefix=SPS_FILE_PREFIX) + sps_files = [] + for obj in sps_file_list['Contents']: + if obj['Key'].startswith(f"{SPS_FILE_PREFIX}/{S3_OBJECT_PREFIX}"): + sps_files.append(obj['Key']) - sps_file_name = sps_files[0] - target_capacity = int(sps_file_name.split('/')[-1].split('_')[2].split('.')[0]) + sps_file_name = sps_files[0] + print(sps_file_name) + target_capacity = target_capacities.index(int(sps_file_name.split('/')[-1].split('_')[2].split('.')[0])) - # ------ Load Data from PKL File in S3 ------ - try: - sps_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, sps_file_name.strip()).get()["Body"])) + # ------ Load Data from PKL File in S3 ------ + sps_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, sps_file_name).get()["Body"])) spotinfo_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, SPOTIF_FILE_NAME.strip()).get()["Body"])) ondemand_price_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, ONDEMAND_PRICE_FILE_NAME.strip()).get()["Body"])) spot_price_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, SPOTPRICE_FILE_NAME.strip()).get()["Body"])) - except Exception as e: - send_slack_message(e) - print(e) - - # ------ Create a DF by Selecting Only The Columns Required ------ - try: + + # ------ Create a DF by Selecting Only The Columns Required ------ sps_df = sps_df[['InstanceType', 'Region', 'AZ', 'SPS', 'T3', 'T2']] spotinfo_df = spotinfo_df[['InstanceType', 'Region', 'IF']] ondemand_price_df = ondemand_price_df[['InstanceType', 'Region', 'OndemandPrice']] spot_price_df = spot_price_df[['InstanceType', 'AZ', 'SpotPrice']] - except Exception as e: - send_slack_message(e) - print(e) - - # ------ Formatting Data ------ - spot_price_df['SpotPrice'] = spot_price_df['SpotPrice'].astype('float').round(5) - ondemand_price_df['OndemandPrice'] = ondemand_price_df['OndemandPrice'].astype('float').round(5) - - # ------ Need to Change to Outer Join ------ - merge_df = pd.merge(sps_df, spotinfo_df, how="outer") - merge_df = pd.merge(merge_df, ondemand_price_df, how="outer") - merge_df = pd.merge(merge_df, spot_price_df, how="outer") - - merge_df['Savings'] = 100.0 - (merge_df['SpotPrice'] * 100 / merge_df['OndemandPrice']) - merge_df['Savings'] = merge_df['Savings'].fillna(-1) - merge_df['SPS'] = merge_df['SPS'].fillna(-1) - merge_df['SpotPrice'] = merge_df['SpotPrice'].fillna(-1) - merge_df['OndemandPrice'] = merge_df['OndemandPrice'].fillna(-1) - merge_df['IF'] = merge_df['IF'].fillna(-1) - - merge_df['Savings'] = merge_df['Savings'].astype('int') - merge_df['SPS'] = merge_df['SPS'].astype('int') - merge_df['T3'] = merge_df['T3'].fillna(0).astype('int') - merge_df['T2'] = merge_df['T2'].fillna(0).astype('int') - - merge_df = merge_df.drop(merge_df[(merge_df['AZ'].isna()) | (merge_df['Region'].isna()) | (merge_df['InstanceType'].isna())].index) - - merge_df.reset_index(drop=True, inplace=True) - merge_df['Time'] = time_value - - end_time = datetime.now(timezone.utc) - print(f"Merging time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") - - # ------ Check The Previous DF File in S3 and Local ------ - previous_df = None - start_time = datetime.now(timezone.utc) - filename = 'latest_aws.json' - LATEST_PATH = f'{BUCKET_FILE_PATH}/latest_data/{filename}' - try: - previous_df = pd.DataFrame(json.load(s3.Object(BUCKET_NAME, LATEST_PATH).get()['Body'])) - # Verify that the data is in the old format - columns_to_check = ["T3", "T2"] - existing_columns = [col for col in columns_to_check if col in previous_df.columns] + # ------ Formatting Data ------ + spot_price_df['SpotPrice'] = spot_price_df['SpotPrice'].astype('float').round(5) + ondemand_price_df['OndemandPrice'] = ondemand_price_df['OndemandPrice'].astype('float').round(5) + + # ------ Need to Change to Outer Join ------ + merge_df = pd.merge(sps_df, spotinfo_df, how="outer") + merge_df = pd.merge(merge_df, ondemand_price_df, how="outer") + merge_df = pd.merge(merge_df, spot_price_df, how="outer") + + merge_df['Savings'] = 100.0 - (merge_df['SpotPrice'] * 100 / merge_df['OndemandPrice']) + merge_df['Savings'] = merge_df['Savings'].fillna(-1) + merge_df['SPS'] = merge_df['SPS'].fillna(-1) + merge_df['SpotPrice'] = merge_df['SpotPrice'].fillna(-1) + merge_df['OndemandPrice'] = merge_df['OndemandPrice'].fillna(-1) + merge_df['IF'] = merge_df['IF'].fillna(-1) + + merge_df['Savings'] = merge_df['Savings'].astype('int') + merge_df['SPS'] = merge_df['SPS'].astype('int') + merge_df['T3'] = merge_df['T3'].fillna(0).astype('int') + merge_df['T2'] = merge_df['T2'].fillna(0).astype('int') + + merge_df = merge_df.drop(merge_df[(merge_df['AZ'].isna()) | (merge_df['Region'].isna()) | (merge_df['InstanceType'].isna())].index) - if len(existing_columns) == 0: - raise - else: - previous_df = previous_df.drop(columns=['Id']) - except: - # If system is first time uploading data, make a new one and upload it to TSDB + merge_df.reset_index(drop=True, inplace=True) + merge_df['Time'] = time_value + + end_time = datetime.now(timezone.utc) + print(f"Merging time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + # ------ Check The Previous DF File in S3 and Local ------ + previous_df = None + start_time = datetime.now(timezone.utc) + filename = 'latest_aws.json' + LATEST_PATH = f'latest_data/{filename}' try: + previous_df = pd.DataFrame(json.load(s3.Object(BUCKET_NAME, LATEST_PATH).get()['Body'])) + # Verify that the data is in the old format + columns_to_check = ["T3", "T2"] + existing_columns = [col for col in columns_to_check if col in previous_df.columns] + + if len(existing_columns) == 0: + raise + else: + previous_df = previous_df.drop(columns=['Id']) + except: + # If system is first time uploading data, make a new one and upload it to TSDB update_latest(merge_df) save_raw(merge_df, TIMESTAMP) upload_timestream(merge_df, TIMESTAMP) - except Exception as e: - send_slack_message(e) - print(e) + end_time = datetime.now(timezone.utc) + print(f"Checking time of previous json file is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + return print("Can't load the previous df from s3 bucket or First run since changing the collector") + end_time = datetime.now(timezone.utc) print(f"Checking time of previous json file is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") - return print("Can't load the previous df from s3 bucket or First run since changing the collector") - - end_time = datetime.now(timezone.utc) - print(f"Checking time of previous json file is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + + start_time = datetime.now(timezone.utc) - start_time = datetime.now(timezone.utc) - # ------ Compare T3 and T2 Data ------ - current_df = compare_max_instance(merge_df, previous_df, target_capacities, target_capacity) + # ------ Compare T3 and T2 Data ------ + current_df = compare_max_instance(merge_df, previous_df, target_capacities, target_capacity) - # ------ Upload Merge DF to s3 Bucket ------ - try: + # ------ Upload Merge DF to s3 Bucket ------ update_latest(current_df) save_raw(current_df, TIMESTAMP) - except Exception as e: - send_slack_message(e) - print(e) - - # ------ Compare All Data ------ - workload_cols = ['InstanceType', 'Region', 'AZ'] - feature_cols = ['SPS', 'T3', 'T2', 'IF', 'SpotPrice', 'OndemandPrice'] + + # ------ Compare All Data ------ + workload_cols = ['InstanceType', 'Region', 'AZ'] + feature_cols = ['SPS', 'T3', 'T2', 'IF', 'SpotPrice', 'OndemandPrice'] - changed_df, removed_df = compare(previous_df, current_df, workload_cols, feature_cols) # compare previous_df and current_df to extract changed rows) - end_time = datetime.now(timezone.utc) - print(f"Compare time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + changed_df, removed_df = compare(previous_df, current_df, workload_cols, feature_cols) # compare previous_df and current_df to extract changed rows) + end_time = datetime.now(timezone.utc) + print(f"Compare time is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") - start_time = datetime.now(timezone.utc) - # ------ Upload TSDB ------ - try: + # ------ Upload TSDB ------ + start_time = datetime.now(timezone.utc) upload_timestream(changed_df, TIMESTAMP) upload_timestream(removed_df, TIMESTAMP) - except Exception as e: - send_slack_message(e) - print(e) - end_time = datetime.now(timezone.utc) - print(f"Uploading time to TSDB is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + end_time = datetime.now(timezone.utc) + print(f"Uploading time to TSDB is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") - start_time = datetime.now(timezone.utc) - # ------ Upload Spotlake Query Selector to S3 ------ - try: + # ------ Upload Spotlake Query Selector to S3 ------ + start_time = datetime.now(timezone.utc) update_query_selector(changed_df) + end_time = datetime.now(timezone.utc) + print(f"Uploading time of query selector data is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") except Exception as e: send_slack_message(e) print(e) - end_time = datetime.now(timezone.utc) - print(f"Uploading time of query selector data is {(end_time - start_time).total_seconds() * 1000 / 60000:.2f} min") + raise def lambda_handler(event, context): start_time = datetime.now(timezone.utc) From ca4f237abc863a042a49f0c304983fd13923914a Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Wed, 12 Feb 2025 18:27:39 +0900 Subject: [PATCH 19/38] Refactor compare_max_instance function to simplify parameters and logic --- .../serverless/post_processing_data/compare_data.py | 6 +++--- .../post_processing_data/spotlake_post_processing.py | 6 ++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/compare_data.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/compare_data.py index ada6ca54..0d42d1cd 100644 --- a/collector/spot-dataset/aws/collector/serverless/post_processing_data/compare_data.py +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/compare_data.py @@ -84,7 +84,7 @@ def compare(previous_df, current_df, workload_cols, feature_cols): return changed_df, removed_df # ------ Compare the values of T3 and T2 ------ -def compare_max_instance(merge_df, previous_df, target_capacities, target_capacity): +def compare_max_instance(merge_df, previous_df, target_capacity): condition = (previous_df['InstanceType'] == merge_df['InstanceType']) & (previous_df['AZ'] == merge_df['AZ']) current_df = merge_df @@ -95,9 +95,9 @@ def compare_max_instance(merge_df, previous_df, target_capacities, target_capaci previous_df.loc[condition, 'T2'], merge_df.loc[condition, 'T2'] ) - current_df.loc[condition & (merge_df['T3'] == target_capacities[target_capacity]), 'T2'] = target_capacities[target_capacity] + current_df.loc[condition & (merge_df['T3'] == target_capacity), 'T2'] = target_capacity - if target_capacity == 0: + if target_capacity == 1: current_df.loc[condition & (merge_df['T3'] == 0), 'T3'] = 0 current_df.loc[condition & (merge_df['T2'] == 0), 'T2'] = 0 else: diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py index 6559c4b7..20a2664b 100644 --- a/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py @@ -36,8 +36,6 @@ def main(): s3 = boto3.resource("s3") s3_client = boto3.client('s3') - target_capacities = [1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50] - # ------ Find Sps File in S3 ------ sps_file_list = s3_client.list_objects_v2(Bucket=BUCKET_NAME, Prefix=SPS_FILE_PREFIX) sps_files = [] @@ -47,7 +45,7 @@ def main(): sps_file_name = sps_files[0] print(sps_file_name) - target_capacity = target_capacities.index(int(sps_file_name.split('/')[-1].split('_')[2].split('.')[0])) + target_capacity = int(sps_file_name.split('/')[-1].split('_')[2].split('.')[0]) # ------ Load Data from PKL File in S3 ------ sps_df = pickle.load(gzip.open(s3.Object(BUCKET_NAME, sps_file_name).get()["Body"])) @@ -120,7 +118,7 @@ def main(): start_time = datetime.now(timezone.utc) # ------ Compare T3 and T2 Data ------ - current_df = compare_max_instance(merge_df, previous_df, target_capacities, target_capacity) + current_df = compare_max_instance(merge_df, previous_df, target_capacity) # ------ Upload Merge DF to s3 Bucket ------ update_latest(current_df) From 70acb2f9f5a977df93ba13e40809d5928f221082 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 00:54:06 +0900 Subject: [PATCH 20/38] Add migration code --- .../aws/collector/server/sps/collect_sps.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index 47cac5e1..edb4e16e 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -14,8 +14,18 @@ def main(): # ------ Setting Constants ------ - BUCKET_NAME = "sps-collector" # test CURRENT_LOCAL_BASE_PATH = "/home/ubuntu/spotlake/collector/spot-dataset/aws/collector/server/sps" + + current_time = datetime.now(timezone.utc) + BUCKET_NAME = None + if current_time >= datetime(2025, 2, 13, tzinfo=timezone.utc): + BUCKET_NAME = "spotlake" + os.remove(f"{CURRENT_LOCAL_BASE_PATH}/credential_index.txt") + os.remove(f"{CURRENT_LOCAL_BASE_PATH}/target_capacity_index.txt") + send_slack_message("신규 SPS Collector 주 저장 경로 전환") + else: + BUCKET_NAME = "sps-collector" # test + WORKLOAD_BASE_PATH = "rawdata/aws/workloads" SPS_BASE_PATH = "rawdata/aws/sps" CREDENTIAL_FILE_PATH = "credential/credential_3699.csv" From f952ab7febf885c7692d4f884c8a80824f91def1 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 08:14:36 +0900 Subject: [PATCH 21/38] Fix hot fix --- .../spot-dataset/aws/collector/server/sps/collect_sps.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py index edb4e16e..e1171fa2 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/collector/server/sps/collect_sps.py @@ -20,9 +20,10 @@ def main(): BUCKET_NAME = None if current_time >= datetime(2025, 2, 13, tzinfo=timezone.utc): BUCKET_NAME = "spotlake" - os.remove(f"{CURRENT_LOCAL_BASE_PATH}/credential_index.txt") - os.remove(f"{CURRENT_LOCAL_BASE_PATH}/target_capacity_index.txt") - send_slack_message("신규 SPS Collector 주 저장 경로 전환") + if current_time.hour == 0 and current_time.minute <= 9: + os.remove(f"{CURRENT_LOCAL_BASE_PATH}/credential_index.txt") + os.remove(f"{CURRENT_LOCAL_BASE_PATH}/target_capacity_index.txt") + send_slack_message("신규 SPS Collector 주 저장 및 업로드 경로 전환") else: BUCKET_NAME = "sps-collector" # test From 0dcc682c97594b77602d9cde0b4ba896ffed13e6 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 12:55:05 +0900 Subject: [PATCH 22/38] Fix id --- .../collector/serverless/post_processing_data/upload_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py index 3dc45451..7c5b3519 100644 --- a/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py @@ -70,7 +70,7 @@ def update_latest(data): filename = 'latest_aws.json' LATEST_PATH = f'latest_data/{filename}' - data['Id'] = data.index+1 + data['id'] = data.index+1 result = data.to_json(f"/tmp/{filename}", orient="records") s3 = boto3.resource('s3') @@ -81,7 +81,7 @@ def update_latest(data): object_acl = s3.ObjectAcl(BUCKET_NAME, LATEST_PATH) response = object_acl.put(ACL='public-read') - data.drop(['Id'], axis=1, inplace=True) + data.drop(['id'], axis=1, inplace=True) def update_query_selector(changed_df): From 1e09e1f968f0441120648d17ef17bbc674a51bfe Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 12:56:28 +0900 Subject: [PATCH 23/38] Fix AWS table name to use environment variable --- .../collector/serverless/post_processing_data/upload_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py index 7c5b3519..20e641e2 100644 --- a/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py @@ -12,7 +12,7 @@ BUCKET_FILE_PATH = os.environ.get('PARENT_PATH') DATABASE_NAME = os.environ.get('DATABASE') -AWS_TABLE_NAME = "aws" +AWS_TABLE_NAME = os.environ.get('DATABASE_TABLE') write_client = boto3.client('timestream-write', config=Config(read_timeout=20, max_pool_connections=5000, retries={'max_attempts':10})) # Submit Batch To Timestream From 84fcea4415ac89c45bf0c50a6690dfe2da66e1d6 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 13:13:13 +0900 Subject: [PATCH 24/38] Add CI/CD --- .github/workflows/aws-lambda-sync.yml | 57 +++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 .github/workflows/aws-lambda-sync.yml diff --git a/.github/workflows/aws-lambda-sync.yml b/.github/workflows/aws-lambda-sync.yml new file mode 100644 index 00000000..a551e54f --- /dev/null +++ b/.github/workflows/aws-lambda-sync.yml @@ -0,0 +1,57 @@ +name: deploy AWS files to lambda +on: + push: + branches: + - "main" + paths: + - "collector/spot-dataset/aws/serverless/**" + - "utility/slack_msg_sender.py" + workflow_dispatch: + +env: + AWS_ACCESS_KEY_ID: ${{ secrets.SPOTRANK_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.SPOTRANK_SECRET_ACCESS_KEY }} + AWS_DEFAULT_REGION: "us-west-2" + +jobs: + deploy_source: + name: deploy lambda from source + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v4 + + - name: Zip lambda function code + run: | + set -e + + declare -A LAMBDA_PATHS=( + ["ondemand_price_collector_lambda.zip"]="collector/spot-dataset/aws/serverless/ondemand_price" + ["spotlake_post_processing_lambda.zip"]="collector/spot-dataset/aws/serverless/post_processing_data" + ["spot_if_collector_lambda.zip"]="collector/spot-dataset/aws/serverless/spot_if" + ["spot_price_collector_lambda.zip"]="collector/spot-dataset/aws/serverless/spot_price" + ["binpacked_workloads_generator_lambda.zip"]="collector/spot-dataset/aws/serverless/workload" + ) + + for zip_name in "${!LAMBDA_PATHS[@]}"; do + echo "Creating ZIP: $zip_name" + zip -r "$zip_name" utility/slack_msg_sender.py + (cd "${LAMBDA_PATHS[$zip_name]}" && zip -r ../../../../"$zip_name" .) + done + + - name: Deploy to AWS Lambda + run: | + set -e + + declare -A LAMBDA_FUNCTIONS=( + ["ondemand-price-collector"]="ondemand_price_collector_lambda.zip" + ["spotlake-post-processing"]="spotlake_post_processing_lambda.zip" + ["spot-if-collector"]="spot_if_collector_lambda.zip" + ["spot-price-collector"]="spot_price_collector_lambda.zip" + ["binpacked-workloads-generator"]="binpacked_workloads_generator_lambda.zip" + ) + + for function_name in "${!LAMBDA_FUNCTIONS[@]}"; do + echo "Deploying: $function_name" + aws lambda update-function-code --function-name "$function_name" --zip-file "fileb://${LAMBDA_FUNCTIONS[$function_name]}" + done From e30c7b9e21e8c1e0bae3a5b3ef9f1eb9f420ade6 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 13:19:20 +0900 Subject: [PATCH 25/38] Fix timestamp --- .../post_processing_data/spotlake_post_processing.py | 4 ++-- .../collector/serverless/post_processing_data/upload_data.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py index 20a2664b..f75bb6cd 100644 --- a/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py @@ -105,7 +105,7 @@ def main(): previous_df = previous_df.drop(columns=['Id']) except: # If system is first time uploading data, make a new one and upload it to TSDB - update_latest(merge_df) + update_latest(merge_df, TIMESTAMP) save_raw(merge_df, TIMESTAMP) upload_timestream(merge_df, TIMESTAMP) end_time = datetime.now(timezone.utc) @@ -121,7 +121,7 @@ def main(): current_df = compare_max_instance(merge_df, previous_df, target_capacity) # ------ Upload Merge DF to s3 Bucket ------ - update_latest(current_df) + update_latest(current_df, TIMESTAMP) save_raw(current_df, TIMESTAMP) # ------ Compare All Data ------ diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py index 20e641e2..aafb77e8 100644 --- a/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py +++ b/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py @@ -65,12 +65,13 @@ def upload_timestream(data, timestamp): submit_batch(records, counter, 0) -def update_latest(data): +def update_latest(data, timestamp): # Upload file to use as previous collection data filename = 'latest_aws.json' LATEST_PATH = f'latest_data/{filename}' data['id'] = data.index+1 + data['time'] = timestamp.strftime("%Y-%m-%d %H:%M:%S") result = data.to_json(f"/tmp/{filename}", orient="records") s3 = boto3.resource('s3') From 9ca958945a0b6f24c02b1457c13ca8de192c82b3 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 13:20:15 +0900 Subject: [PATCH 26/38] Change AWS Lambda function --- .../{spotlake_post_processing.py => lambda_function.py} | 0 .../serverless/spot_if/{collect_spot_if.py => lambda_function.py} | 0 .../spot_price/{collect_spot_price.py => lambda_function.py} | 0 .../serverless/workload/{make_workloads.py => lambda_function.py} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename collector/spot-dataset/aws/collector/serverless/post_processing_data/{spotlake_post_processing.py => lambda_function.py} (100%) rename collector/spot-dataset/aws/collector/serverless/spot_if/{collect_spot_if.py => lambda_function.py} (100%) rename collector/spot-dataset/aws/collector/serverless/spot_price/{collect_spot_price.py => lambda_function.py} (100%) rename collector/spot-dataset/aws/collector/serverless/workload/{make_workloads.py => lambda_function.py} (100%) diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py b/collector/spot-dataset/aws/collector/serverless/post_processing_data/lambda_function.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/post_processing_data/spotlake_post_processing.py rename to collector/spot-dataset/aws/collector/serverless/post_processing_data/lambda_function.py diff --git a/collector/spot-dataset/aws/collector/serverless/spot_if/collect_spot_if.py b/collector/spot-dataset/aws/collector/serverless/spot_if/lambda_function.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/spot_if/collect_spot_if.py rename to collector/spot-dataset/aws/collector/serverless/spot_if/lambda_function.py diff --git a/collector/spot-dataset/aws/collector/serverless/spot_price/collect_spot_price.py b/collector/spot-dataset/aws/collector/serverless/spot_price/lambda_function.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/spot_price/collect_spot_price.py rename to collector/spot-dataset/aws/collector/serverless/spot_price/lambda_function.py diff --git a/collector/spot-dataset/aws/collector/serverless/workload/make_workloads.py b/collector/spot-dataset/aws/collector/serverless/workload/lambda_function.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/workload/make_workloads.py rename to collector/spot-dataset/aws/collector/serverless/workload/lambda_function.py From d82e29278177dd04ee98baf145e3f84061a523ab Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 13:29:24 +0900 Subject: [PATCH 27/38] Refactor folder struct --- .github/workflows/aws-lambda-sync.yml | 12 ++++++------ .../aws/{collector/server => ec2}/sps/collect_sps.py | 2 +- .../{collector/server => ec2}/sps/requirements.txt | 0 .../{collector/server => ec2}/sps/sps_query_api.py | 0 .../aws/{collector/server => ec2}/sps/start_query.sh | 0 .../ondemand_price/collect_ondemand_price.py | 0 .../ondemand_price/load_price.py | 0 .../post_processing_data/compare_data.py | 0 .../post_processing_data/lambda_function.py | 0 .../post_processing_data/upload_data.py | 0 .../serverless => lambda}/spot_if/lambda_function.py | 0 .../spot_price/lambda_function.py | 0 .../serverless => lambda}/spot_price/load_price.py | 0 .../workload/lambda_function.py | 0 .../serverless => lambda}/workload/load_metadata.py | 0 .../{ => legacy}/multi_node_sps_collector/.gitignore | 0 .../{ => legacy}/multi_node_sps_collector/install.sh | 0 .../{ => legacy}/multi_node_sps_collector/main.py | 0 .../multi_node_sps_collector/monitoring/.gitignore | 0 .../monitoring/install_lib.sh | 0 .../monitoring/lambda_function.py | 0 .../monitoring/slack_msg_sender.py | 0 .../multi_node_sps_collector/requirements.txt | 0 .../multi_node_sps_collector/sps_query_api.py | 0 .../multi_node_sps_collector/sps_utils.py | 0 .../multi_node_sps_collector/start_query.sh | 0 .../single_node_sps_collector/aws_collect.py | 0 .../single_node_sps_collector/compare_data.py | 0 .../single_node_sps_collector/join_data.py | 0 .../single_node_sps_collector/load_metadata.py | 0 .../single_node_sps_collector/load_price.py | 0 .../load_spot_placement_score.py | 0 .../single_node_sps_collector/load_spotinfo.py | 0 .../single_node_sps_collector/requirements.txt | 0 .../single_node_sps_collector/upload_data.py | 0 .../single_node_sps_collector/workload_binpacking.py | 0 36 files changed, 7 insertions(+), 7 deletions(-) rename collector/spot-dataset/aws/{collector/server => ec2}/sps/collect_sps.py (99%) rename collector/spot-dataset/aws/{collector/server => ec2}/sps/requirements.txt (100%) rename collector/spot-dataset/aws/{collector/server => ec2}/sps/sps_query_api.py (100%) rename collector/spot-dataset/aws/{collector/server => ec2}/sps/start_query.sh (100%) rename collector/spot-dataset/aws/{collector/serverless => lambda}/ondemand_price/collect_ondemand_price.py (100%) rename collector/spot-dataset/aws/{collector/serverless => lambda}/ondemand_price/load_price.py (100%) rename collector/spot-dataset/aws/{collector/serverless => lambda}/post_processing_data/compare_data.py (100%) rename collector/spot-dataset/aws/{collector/serverless => lambda}/post_processing_data/lambda_function.py (100%) rename collector/spot-dataset/aws/{collector/serverless => lambda}/post_processing_data/upload_data.py (100%) rename collector/spot-dataset/aws/{collector/serverless => lambda}/spot_if/lambda_function.py (100%) rename collector/spot-dataset/aws/{collector/serverless => lambda}/spot_price/lambda_function.py (100%) rename collector/spot-dataset/aws/{collector/serverless => lambda}/spot_price/load_price.py (100%) rename collector/spot-dataset/aws/{collector/serverless => lambda}/workload/lambda_function.py (100%) rename collector/spot-dataset/aws/{collector/serverless => lambda}/workload/load_metadata.py (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/.gitignore (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/install.sh (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/main.py (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/monitoring/.gitignore (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/monitoring/install_lib.sh (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/monitoring/lambda_function.py (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/monitoring/slack_msg_sender.py (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/requirements.txt (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/sps_query_api.py (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/sps_utils.py (100%) rename collector/spot-dataset/aws/{ => legacy}/multi_node_sps_collector/start_query.sh (100%) rename collector/spot-dataset/aws/{ => legacy}/single_node_sps_collector/aws_collect.py (100%) rename collector/spot-dataset/aws/{ => legacy}/single_node_sps_collector/compare_data.py (100%) rename collector/spot-dataset/aws/{ => legacy}/single_node_sps_collector/join_data.py (100%) rename collector/spot-dataset/aws/{ => legacy}/single_node_sps_collector/load_metadata.py (100%) rename collector/spot-dataset/aws/{ => legacy}/single_node_sps_collector/load_price.py (100%) rename collector/spot-dataset/aws/{ => legacy}/single_node_sps_collector/load_spot_placement_score.py (100%) rename collector/spot-dataset/aws/{ => legacy}/single_node_sps_collector/load_spotinfo.py (100%) rename collector/spot-dataset/aws/{ => legacy}/single_node_sps_collector/requirements.txt (100%) rename collector/spot-dataset/aws/{ => legacy}/single_node_sps_collector/upload_data.py (100%) rename collector/spot-dataset/aws/{ => legacy}/single_node_sps_collector/workload_binpacking.py (100%) diff --git a/.github/workflows/aws-lambda-sync.yml b/.github/workflows/aws-lambda-sync.yml index a551e54f..ae388062 100644 --- a/.github/workflows/aws-lambda-sync.yml +++ b/.github/workflows/aws-lambda-sync.yml @@ -4,7 +4,7 @@ on: branches: - "main" paths: - - "collector/spot-dataset/aws/serverless/**" + - "collector/spot-dataset/aws/lambda/**" - "utility/slack_msg_sender.py" workflow_dispatch: @@ -26,11 +26,11 @@ jobs: set -e declare -A LAMBDA_PATHS=( - ["ondemand_price_collector_lambda.zip"]="collector/spot-dataset/aws/serverless/ondemand_price" - ["spotlake_post_processing_lambda.zip"]="collector/spot-dataset/aws/serverless/post_processing_data" - ["spot_if_collector_lambda.zip"]="collector/spot-dataset/aws/serverless/spot_if" - ["spot_price_collector_lambda.zip"]="collector/spot-dataset/aws/serverless/spot_price" - ["binpacked_workloads_generator_lambda.zip"]="collector/spot-dataset/aws/serverless/workload" + ["ondemand_price_collector_lambda.zip"]="collector/spot-dataset/aws/lambda/ondemand_price" + ["spotlake_post_processing_lambda.zip"]="collector/spot-dataset/aws/lambda/post_processing_data" + ["spot_if_collector_lambda.zip"]="collector/spot-dataset/aws/lambda/spot_if" + ["spot_price_collector_lambda.zip"]="collector/spot-dataset/aws/lambda/spot_price" + ["binpacked_workloads_generator_lambda.zip"]="collector/spot-dataset/aws/lambda/workload" ) for zip_name in "${!LAMBDA_PATHS[@]}"; do diff --git a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py b/collector/spot-dataset/aws/ec2/sps/collect_sps.py similarity index 99% rename from collector/spot-dataset/aws/collector/server/sps/collect_sps.py rename to collector/spot-dataset/aws/ec2/sps/collect_sps.py index e1171fa2..171c1a54 100644 --- a/collector/spot-dataset/aws/collector/server/sps/collect_sps.py +++ b/collector/spot-dataset/aws/ec2/sps/collect_sps.py @@ -14,7 +14,7 @@ def main(): # ------ Setting Constants ------ - CURRENT_LOCAL_BASE_PATH = "/home/ubuntu/spotlake/collector/spot-dataset/aws/collector/server/sps" + CURRENT_LOCAL_BASE_PATH = "/home/ubuntu/spotlake/collector/spot-dataset/aws/ec2/sps" current_time = datetime.now(timezone.utc) BUCKET_NAME = None diff --git a/collector/spot-dataset/aws/collector/server/sps/requirements.txt b/collector/spot-dataset/aws/ec2/sps/requirements.txt similarity index 100% rename from collector/spot-dataset/aws/collector/server/sps/requirements.txt rename to collector/spot-dataset/aws/ec2/sps/requirements.txt diff --git a/collector/spot-dataset/aws/collector/server/sps/sps_query_api.py b/collector/spot-dataset/aws/ec2/sps/sps_query_api.py similarity index 100% rename from collector/spot-dataset/aws/collector/server/sps/sps_query_api.py rename to collector/spot-dataset/aws/ec2/sps/sps_query_api.py diff --git a/collector/spot-dataset/aws/collector/server/sps/start_query.sh b/collector/spot-dataset/aws/ec2/sps/start_query.sh similarity index 100% rename from collector/spot-dataset/aws/collector/server/sps/start_query.sh rename to collector/spot-dataset/aws/ec2/sps/start_query.sh diff --git a/collector/spot-dataset/aws/collector/serverless/ondemand_price/collect_ondemand_price.py b/collector/spot-dataset/aws/lambda/ondemand_price/collect_ondemand_price.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/ondemand_price/collect_ondemand_price.py rename to collector/spot-dataset/aws/lambda/ondemand_price/collect_ondemand_price.py diff --git a/collector/spot-dataset/aws/collector/serverless/ondemand_price/load_price.py b/collector/spot-dataset/aws/lambda/ondemand_price/load_price.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/ondemand_price/load_price.py rename to collector/spot-dataset/aws/lambda/ondemand_price/load_price.py diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/compare_data.py b/collector/spot-dataset/aws/lambda/post_processing_data/compare_data.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/post_processing_data/compare_data.py rename to collector/spot-dataset/aws/lambda/post_processing_data/compare_data.py diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/lambda_function.py b/collector/spot-dataset/aws/lambda/post_processing_data/lambda_function.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/post_processing_data/lambda_function.py rename to collector/spot-dataset/aws/lambda/post_processing_data/lambda_function.py diff --git a/collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py b/collector/spot-dataset/aws/lambda/post_processing_data/upload_data.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/post_processing_data/upload_data.py rename to collector/spot-dataset/aws/lambda/post_processing_data/upload_data.py diff --git a/collector/spot-dataset/aws/collector/serverless/spot_if/lambda_function.py b/collector/spot-dataset/aws/lambda/spot_if/lambda_function.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/spot_if/lambda_function.py rename to collector/spot-dataset/aws/lambda/spot_if/lambda_function.py diff --git a/collector/spot-dataset/aws/collector/serverless/spot_price/lambda_function.py b/collector/spot-dataset/aws/lambda/spot_price/lambda_function.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/spot_price/lambda_function.py rename to collector/spot-dataset/aws/lambda/spot_price/lambda_function.py diff --git a/collector/spot-dataset/aws/collector/serverless/spot_price/load_price.py b/collector/spot-dataset/aws/lambda/spot_price/load_price.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/spot_price/load_price.py rename to collector/spot-dataset/aws/lambda/spot_price/load_price.py diff --git a/collector/spot-dataset/aws/collector/serverless/workload/lambda_function.py b/collector/spot-dataset/aws/lambda/workload/lambda_function.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/workload/lambda_function.py rename to collector/spot-dataset/aws/lambda/workload/lambda_function.py diff --git a/collector/spot-dataset/aws/collector/serverless/workload/load_metadata.py b/collector/spot-dataset/aws/lambda/workload/load_metadata.py similarity index 100% rename from collector/spot-dataset/aws/collector/serverless/workload/load_metadata.py rename to collector/spot-dataset/aws/lambda/workload/load_metadata.py diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/.gitignore b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/.gitignore similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/.gitignore rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/.gitignore diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/install.sh b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/install.sh similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/install.sh rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/install.sh diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/main.py b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/main.py similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/main.py rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/main.py diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/monitoring/.gitignore b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/monitoring/.gitignore similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/monitoring/.gitignore rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/monitoring/.gitignore diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/monitoring/install_lib.sh b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/monitoring/install_lib.sh similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/monitoring/install_lib.sh rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/monitoring/install_lib.sh diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/monitoring/lambda_function.py b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/monitoring/lambda_function.py similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/monitoring/lambda_function.py rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/monitoring/lambda_function.py diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/monitoring/slack_msg_sender.py b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/monitoring/slack_msg_sender.py similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/monitoring/slack_msg_sender.py rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/monitoring/slack_msg_sender.py diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/requirements.txt b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/requirements.txt similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/requirements.txt rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/requirements.txt diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/sps_query_api.py b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/sps_query_api.py similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/sps_query_api.py rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/sps_query_api.py diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/sps_utils.py b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/sps_utils.py similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/sps_utils.py rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/sps_utils.py diff --git a/collector/spot-dataset/aws/multi_node_sps_collector/start_query.sh b/collector/spot-dataset/aws/legacy/multi_node_sps_collector/start_query.sh similarity index 100% rename from collector/spot-dataset/aws/multi_node_sps_collector/start_query.sh rename to collector/spot-dataset/aws/legacy/multi_node_sps_collector/start_query.sh diff --git a/collector/spot-dataset/aws/single_node_sps_collector/aws_collect.py b/collector/spot-dataset/aws/legacy/single_node_sps_collector/aws_collect.py similarity index 100% rename from collector/spot-dataset/aws/single_node_sps_collector/aws_collect.py rename to collector/spot-dataset/aws/legacy/single_node_sps_collector/aws_collect.py diff --git a/collector/spot-dataset/aws/single_node_sps_collector/compare_data.py b/collector/spot-dataset/aws/legacy/single_node_sps_collector/compare_data.py similarity index 100% rename from collector/spot-dataset/aws/single_node_sps_collector/compare_data.py rename to collector/spot-dataset/aws/legacy/single_node_sps_collector/compare_data.py diff --git a/collector/spot-dataset/aws/single_node_sps_collector/join_data.py b/collector/spot-dataset/aws/legacy/single_node_sps_collector/join_data.py similarity index 100% rename from collector/spot-dataset/aws/single_node_sps_collector/join_data.py rename to collector/spot-dataset/aws/legacy/single_node_sps_collector/join_data.py diff --git a/collector/spot-dataset/aws/single_node_sps_collector/load_metadata.py b/collector/spot-dataset/aws/legacy/single_node_sps_collector/load_metadata.py similarity index 100% rename from collector/spot-dataset/aws/single_node_sps_collector/load_metadata.py rename to collector/spot-dataset/aws/legacy/single_node_sps_collector/load_metadata.py diff --git a/collector/spot-dataset/aws/single_node_sps_collector/load_price.py b/collector/spot-dataset/aws/legacy/single_node_sps_collector/load_price.py similarity index 100% rename from collector/spot-dataset/aws/single_node_sps_collector/load_price.py rename to collector/spot-dataset/aws/legacy/single_node_sps_collector/load_price.py diff --git a/collector/spot-dataset/aws/single_node_sps_collector/load_spot_placement_score.py b/collector/spot-dataset/aws/legacy/single_node_sps_collector/load_spot_placement_score.py similarity index 100% rename from collector/spot-dataset/aws/single_node_sps_collector/load_spot_placement_score.py rename to collector/spot-dataset/aws/legacy/single_node_sps_collector/load_spot_placement_score.py diff --git a/collector/spot-dataset/aws/single_node_sps_collector/load_spotinfo.py b/collector/spot-dataset/aws/legacy/single_node_sps_collector/load_spotinfo.py similarity index 100% rename from collector/spot-dataset/aws/single_node_sps_collector/load_spotinfo.py rename to collector/spot-dataset/aws/legacy/single_node_sps_collector/load_spotinfo.py diff --git a/collector/spot-dataset/aws/single_node_sps_collector/requirements.txt b/collector/spot-dataset/aws/legacy/single_node_sps_collector/requirements.txt similarity index 100% rename from collector/spot-dataset/aws/single_node_sps_collector/requirements.txt rename to collector/spot-dataset/aws/legacy/single_node_sps_collector/requirements.txt diff --git a/collector/spot-dataset/aws/single_node_sps_collector/upload_data.py b/collector/spot-dataset/aws/legacy/single_node_sps_collector/upload_data.py similarity index 100% rename from collector/spot-dataset/aws/single_node_sps_collector/upload_data.py rename to collector/spot-dataset/aws/legacy/single_node_sps_collector/upload_data.py diff --git a/collector/spot-dataset/aws/single_node_sps_collector/workload_binpacking.py b/collector/spot-dataset/aws/legacy/single_node_sps_collector/workload_binpacking.py similarity index 100% rename from collector/spot-dataset/aws/single_node_sps_collector/workload_binpacking.py rename to collector/spot-dataset/aws/legacy/single_node_sps_collector/workload_binpacking.py From bfa5389fc1809f5b6ed87981718244b8917eba6f Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 13:32:19 +0900 Subject: [PATCH 28/38] Fix func name --- .github/workflows/aws-lambda-sync.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/aws-lambda-sync.yml b/.github/workflows/aws-lambda-sync.yml index ae388062..88792ac3 100644 --- a/.github/workflows/aws-lambda-sync.yml +++ b/.github/workflows/aws-lambda-sync.yml @@ -44,11 +44,11 @@ jobs: set -e declare -A LAMBDA_FUNCTIONS=( - ["ondemand-price-collector"]="ondemand_price_collector_lambda.zip" - ["spotlake-post-processing"]="spotlake_post_processing_lambda.zip" - ["spot-if-collector"]="spot_if_collector_lambda.zip" - ["spot-price-collector"]="spot_price_collector_lambda.zip" - ["binpacked-workloads-generator"]="binpacked_workloads_generator_lambda.zip" + ["ondemand_price_collector"]="ondemand_price_collector_lambda.zip" + ["spotlake_post_processing"]="spotlake_post_processing_lambda.zip" + ["spot_if_collector"]="spot_if_collector_lambda.zip" + ["spot_price_collector"]="spot_price_collector_lambda.zip" + ["binpacked_workloads_generator"]="binpacked_workloads_generator_lambda.zip" ) for function_name in "${!LAMBDA_FUNCTIONS[@]}"; do From f495e86683fcf59f2c616071cbdb4634a4e1383d Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 14:50:07 +0900 Subject: [PATCH 29/38] Fix CI/CD --- .github/workflows/aws-lambda-sync.yml | 4 ++-- .../{collect_ondemand_price.py => lambda_function.py} | 0 2 files changed, 2 insertions(+), 2 deletions(-) rename collector/spot-dataset/aws/lambda/ondemand_price/{collect_ondemand_price.py => lambda_function.py} (100%) diff --git a/.github/workflows/aws-lambda-sync.yml b/.github/workflows/aws-lambda-sync.yml index 88792ac3..c499fc2e 100644 --- a/.github/workflows/aws-lambda-sync.yml +++ b/.github/workflows/aws-lambda-sync.yml @@ -35,8 +35,8 @@ jobs: for zip_name in "${!LAMBDA_PATHS[@]}"; do echo "Creating ZIP: $zip_name" - zip -r "$zip_name" utility/slack_msg_sender.py - (cd "${LAMBDA_PATHS[$zip_name]}" && zip -r ../../../../"$zip_name" .) + zip -j "$zip_name" utility/slack_msg_sender.py + zip -j "$zip_name" "${LAMBDA_PATHS[$zip_name]}"/* done - name: Deploy to AWS Lambda diff --git a/collector/spot-dataset/aws/lambda/ondemand_price/collect_ondemand_price.py b/collector/spot-dataset/aws/lambda/ondemand_price/lambda_function.py similarity index 100% rename from collector/spot-dataset/aws/lambda/ondemand_price/collect_ondemand_price.py rename to collector/spot-dataset/aws/lambda/ondemand_price/lambda_function.py From e34694bacfe88860682835482007ace3cc7da4c1 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 19:39:49 +0900 Subject: [PATCH 30/38] Refactor error boundary & delete unused code --- .../ErrorBoundary/ErrorBoundary.jsx | 41 ++++ frontend/src/pages/home/home.js | 221 +++++++----------- 2 files changed, 128 insertions(+), 134 deletions(-) create mode 100644 frontend/src/components/ErrorBoundary/ErrorBoundary.jsx diff --git a/frontend/src/components/ErrorBoundary/ErrorBoundary.jsx b/frontend/src/components/ErrorBoundary/ErrorBoundary.jsx new file mode 100644 index 00000000..25130c1f --- /dev/null +++ b/frontend/src/components/ErrorBoundary/ErrorBoundary.jsx @@ -0,0 +1,41 @@ +import React, { Component } from "react"; +import { Alert, Snackbar } from "@mui/material"; + +class ErrorBoundary extends Component { + constructor(props) { + super(props); + this.state = { hasError: false, errorMessage: "" }; + } + + static getDerivedStateFromError(error) { + return { hasError: true, errorMessage: error.message }; + } + + componentDidCatch(error, errorInfo) { + console.error("ErrorBoundary caught an error:", error, errorInfo); + } + + handleClose = () => { + this.setState({ hasError: false, errorMessage: "" }); + }; + + render() { + if (this.state.hasError) { + return ( + + + {this.state.errorMessage || "Something went wrong."} + + + ); + } + return this.props.children; + } +} + +export default ErrorBoundary; diff --git a/frontend/src/pages/home/home.js b/frontend/src/pages/home/home.js index 45e458c9..3ba4057b 100644 --- a/frontend/src/pages/home/home.js +++ b/frontend/src/pages/home/home.js @@ -1,28 +1,18 @@ -import React, { useEffect, useRef, useState } from "react"; +import React, { useEffect, useState } from "react"; import axios from "axios"; import * as style from "./styles"; import LinearProgress from "@mui/material/LinearProgress"; import Typography from "@mui/material/Typography"; import Box from "@mui/material/Box"; import DataTable from "../../components/DataTable/DataTable"; -import ChartModal from "../../components/Modal/ChartModal"; +import ErrorBoundary from "../../components/ErrorBoundary/ErrorBoundary"; import CustomToolbar from "../../components/DataTable/ToolBar"; import Query from "../../components/QuerySection/Query"; import { Snackbar, Alert } from "@mui/material"; function Home() { - const [w, setWidth] = useState(window.innerWidth * 0.6); - const [chartModal, setChartModal] = useState(false); const [getData, setGetdata] = useState([]); - const [IFGraph, setIFGraph] = useState([]); - const [SPSGraph, setSPSGraph] = useState([]); - const [SPGraph, setSPGraph] = useState([]); - const [alpha, setAlpha] = useState(0.7); - const alphaInput = useRef(); const [selectedData, setSelectedData] = useState([]); - const [graphData, setGraphData] = useState([]); - const [graphLoad, setGraphLoad] = useState(false); - const [visualDate, setVisualDate] = useState(0); const [vendor, setVendor] = useState("AWS"); const [GCPData, setGCPData] = useState([]); const [AZUREData, setAZUREData] = useState([]); @@ -46,10 +36,6 @@ function Home() { severity: "error", }); - useEffect(() => { - setWidth(window.innerWidth * 0.6); - }, [window.innerWidth]); - useEffect(() => { getLatestData( "AWS", @@ -81,19 +67,6 @@ function Home() { } } }, [vendor]); - useEffect(() => { - //데이터 가져오기 한번 끝날때마다 한곳에 모으기 - if (SPSGraph.length > visualDate) { - for (let i = 0; i < SPGraph.length - selectedData.length; i++) { - Object.assign(IFGraph[i], IFGraph[i + visualDate]); - setIFGraph(IFGraph.slice(0, visualDate)); - Object.assign(SPSGraph[i], SPSGraph[i + visualDate]); - setSPSGraph(SPSGraph.slice(0, visualDate)); - Object.assign(SPGraph[i], SPGraph[i + visualDate]); - setSPGraph(SPGraph.slice(0, visualDate)); - } - } - }, [graphData]); //latest data 가져오기 const getLatestData = async (curVendor, DataUrl, setLatestData) => { @@ -142,17 +115,7 @@ function Home() { severity: "error", }); }); - }; - - const changeAlpha = () => { - const ainput = alphaInput.current.value; - if (ainput <= 0.5 || ainput > 1) { - alert( - "Please enter a value greater than 0.5 and less than or equal to 1.0" - ); - } else { - ainput === "" ? setAlpha(0.7) : setAlpha(ainput); - } + setLatestData(["ss"]); }; const LinearProgressWithLabel = (props) => { @@ -171,103 +134,93 @@ function Home() { }; return ( -
- - - { - setVendor("AWS"); - }} - clicked={vendor === "AWS"} - disabled={progress[vendor].loading} - > - - Amazon Web Services - - { - setVendor("GCP"); - }} - clicked={vendor === "GCP"} - disabled={progress[vendor].loading} - > - - Google Cloud Platform - - { - setVendor("AZURE"); - }} - clicked={vendor === "AZURE"} - disabled={progress[vendor].loading} - > - - Microsoft Azure - - - - {chartModal && ( - - )} - - {vendor && progress[vendor].loading && ( - - - - After the data is loaded, you can change to other vendors. - - - )} - +
+ + + { + setVendor("AWS"); + }} + clicked={vendor === "AWS"} + disabled={progress[vendor].loading} + > + + Amazon Web Services + + { + setVendor("GCP"); + }} + clicked={vendor === "GCP"} + disabled={progress[vendor].loading} + > + + Google Cloud Platform + + { + setVendor("AZURE"); + }} + clicked={vendor === "AZURE"} + disabled={progress[vendor].loading} + > + + Microsoft Azure + + + } + selectedData={selectedData} setSelectedData={setSelectedData} + setGetdata={setGetdata} + setGCPData={setGCPData} + setAZUREData={setAZUREData} /> - - - {/* Snackbar component displays error messages. */} - setSnackbar((prev) => ({ ...prev, open: false }))} - > - {snackbar.message} - -
+ + + {vendor && progress[vendor].loading && ( + + + + After the data is loaded, you can change to other vendors. + + + )} + } + setSelectedData={setSelectedData} + /> + +
+ {/* Snackbar component displays error messages. */} + setSnackbar((prev) => ({ ...prev, open: false }))} + > + {snackbar.message} + +
+ ); } export default Home; From 05e30deb48a3267559e0aeed4b51b53aa46c5d33 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 19:41:54 +0900 Subject: [PATCH 31/38] Fix typo --- frontend/public/index.html | 10 +++++----- frontend/src/pages/home/home.js | 1 - 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/frontend/public/index.html b/frontend/public/index.html index 7955f28d..d7996ca4 100644 --- a/frontend/public/index.html +++ b/frontend/public/index.html @@ -2,19 +2,19 @@ - + - + - + - Spot Lake + SpotLake diff --git a/frontend/src/pages/home/home.js b/frontend/src/pages/home/home.js index 3ba4057b..647c9166 100644 --- a/frontend/src/pages/home/home.js +++ b/frontend/src/pages/home/home.js @@ -115,7 +115,6 @@ function Home() { severity: "error", }); }); - setLatestData(["ss"]); }; const LinearProgressWithLabel = (props) => { From acd45c62bd3aaa0f52845175c7a73202595b46a9 Mon Sep 17 00:00:00 2001 From: TaeYoon Date: Thu, 13 Feb 2025 19:44:40 +0900 Subject: [PATCH 32/38] Update build --- frontend/build/asset-manifest.json | 6 +++--- frontend/build/index.html | 2 +- frontend/build/static/js/main.200900db.js | 3 +++ ...4.js.LICENSE.txt => main.200900db.js.LICENSE.txt} | 12 ------------ frontend/build/static/js/main.200900db.js.map | 1 + frontend/build/static/js/main.52bffc24.js | 3 --- frontend/build/static/js/main.52bffc24.js.map | 1 - 7 files changed, 8 insertions(+), 20 deletions(-) create mode 100644 frontend/build/static/js/main.200900db.js rename frontend/build/static/js/{main.52bffc24.js.LICENSE.txt => main.200900db.js.LICENSE.txt} (85%) create mode 100644 frontend/build/static/js/main.200900db.js.map delete mode 100644 frontend/build/static/js/main.52bffc24.js delete mode 100644 frontend/build/static/js/main.52bffc24.js.map diff --git a/frontend/build/asset-manifest.json b/frontend/build/asset-manifest.json index b888b781..35240352 100644 --- a/frontend/build/asset-manifest.json +++ b/frontend/build/asset-manifest.json @@ -1,15 +1,15 @@ { "files": { "main.css": "/static/css/main.95bea462.css", - "main.js": "/static/js/main.52bffc24.js", + "main.js": "/static/js/main.200900db.js", "static/js/488.a88b8761.chunk.js": "/static/js/488.a88b8761.chunk.js", "index.html": "/index.html", "main.95bea462.css.map": "/static/css/main.95bea462.css.map", - "main.52bffc24.js.map": "/static/js/main.52bffc24.js.map", + "main.200900db.js.map": "/static/js/main.200900db.js.map", "488.a88b8761.chunk.js.map": "/static/js/488.a88b8761.chunk.js.map" }, "entrypoints": [ "static/css/main.95bea462.css", - "static/js/main.52bffc24.js" + "static/js/main.200900db.js" ] } \ No newline at end of file diff --git a/frontend/build/index.html b/frontend/build/index.html index 037ab34a..335da2f7 100644 --- a/frontend/build/index.html +++ b/frontend/build/index.html @@ -1 +1 @@ -Spot Lake
\ No newline at end of file +SpotLake
\ No newline at end of file diff --git a/frontend/build/static/js/main.200900db.js b/frontend/build/static/js/main.200900db.js new file mode 100644 index 00000000..c0a31fa9 --- /dev/null +++ b/frontend/build/static/js/main.200900db.js @@ -0,0 +1,3 @@ +/*! For license information please see main.200900db.js.LICENSE.txt */ +(()=>{var e={3803:(e,t,n)=>{"use strict";n.d(t,{A:()=>oe});var r=function(){function e(e){var t=this;this._insertTag=function(e){var n;n=0===t.tags.length?t.insertionPoint?t.insertionPoint.nextSibling:t.prepend?t.container.firstChild:t.before:t.tags[t.tags.length-1].nextSibling,t.container.insertBefore(e,n),t.tags.push(e)},this.isSpeedy=void 0===e.speedy||e.speedy,this.tags=[],this.ctr=0,this.nonce=e.nonce,this.key=e.key,this.container=e.container,this.prepend=e.prepend,this.insertionPoint=e.insertionPoint,this.before=null}var t=e.prototype;return t.hydrate=function(e){e.forEach(this._insertTag)},t.insert=function(e){this.ctr%(this.isSpeedy?65e3:1)===0&&this._insertTag(function(e){var t=document.createElement("style");return t.setAttribute("data-emotion",e.key),void 0!==e.nonce&&t.setAttribute("nonce",e.nonce),t.appendChild(document.createTextNode("")),t.setAttribute("data-s",""),t}(this));var t=this.tags[this.tags.length-1];if(this.isSpeedy){var n=function(e){if(e.sheet)return e.sheet;for(var t=0;t0?u(w,--b):0,g--,10===y&&(g=1,m--),y}function k(){return y=b2||P(y)>3?"":" "}function L(e,t){for(;--t&&k()&&!(y<48||y>102||y>57&&y<65||y>70&&y<97););return E(e,A()+(t<6&&32==R()&&32==k()))}function j(e){for(;k();)switch(y){case e:return b;case 34:case 39:34!==e&&39!==e&&j(y);break;case 40:41===e&&j(e);break;case 92:k()}return b}function F(e,t){for(;k()&&e+y!==57&&(e+y!==84||47!==R()););return"/*"+E(t,b-1)+"*"+a(47===e?e:k())}function N(e){for(;!P(R());)k();return E(e,b)}var D="-ms-",_="-moz-",z="-webkit-",H="comm",B="rule",V="decl",W="@keyframes";function U(e,t){for(var n="",r=f(e),o=0;o0&&p(_)-v&&h(y>32?Y(_+";",r,n,v-1):Y(s(_," ","")+";",r,n,v-2),f);break;case 59:_+=";";default:if(h(D=$(_,t,n,m,g,o,d,I,M=[],j=[],v),l),123===P)if(0===g)q(_,t,D,D,M,l,v,d,j);else switch(99===b&&110===u(_,3)?100:b){case 100:case 108:case 109:case 115:q(e,D,D,r&&h($(e,D,D,0,0,o,d,I,o,M=[],v),j),o,j,v,d,r?M:j);break;default:q(_,D,D,D,[""],j,0,d,j)}}m=g=y=0,x=E=1,I=_="",v=i;break;case 58:v=1+p(_),y=w;default:if(x<1)if(123==P)--x;else if(125==P&&0==x++&&125==S())continue;switch(_+=a(P),P*x){case 38:E=g>0?1:(_+="\f",-1);break;case 44:d[m++]=(p(_)-1)*E,E=1;break;case 64:45===R()&&(_+=O(k())),b=R(),g=v=p(I=_+=N(A())),P++;break;case 45:45===w&&2==p(_)&&(x=0)}}return l}function $(e,t,n,r,a,l,c,u,p,h,m){for(var g=a-1,v=0===a?l:[""],b=f(v),y=0,w=0,C=0;y0?v[S]+" "+k:s(k,/&\f/g,v[S])))&&(p[C++]=R);return x(e,t,n,0===a?B:u,p,h,m)}function X(e,t,n){return x(e,t,n,H,a(y),d(e,2,-2),0)}function Y(e,t,n,r){return x(e,t,n,V,d(e,0,r),d(e,r+1,-1),r)}var Q=function(e,t,n){for(var r=0,o=0;r=o,o=R(),38===r&&12===o&&(t[n]=1),!P(o);)k();return E(e,b)},Z=function(e,t){return M(function(e,t){var n=-1,r=44;do{switch(P(r)){case 0:38===r&&12===R()&&(t[n]=1),e[n]+=Q(b-1,t,n);break;case 2:e[n]+=O(r);break;case 4:if(44===r){e[++n]=58===R()?"&\f":"",t[n]=e[n].length;break}default:e[n]+=a(r)}}while(r=k());return e}(I(e),t))},J=new WeakMap,ee=function(e){if("rule"===e.type&&e.parent&&!(e.length<1)){for(var t=e.value,n=e.parent,r=e.column===n.column&&e.line===n.line;"rule"!==n.type;)if(!(n=n.parent))return;if((1!==e.props.length||58===t.charCodeAt(0)||J.get(n))&&!r){J.set(e,!0);for(var o=[],a=Z(t,o),l=n.props,i=0,s=0;i6)switch(u(e,t+1)){case 109:if(45!==u(e,t+4))break;case 102:return s(e,/(.+:)(.+)-([^]+)/,"$1"+z+"$2-$3$1"+_+(108==u(e,t+3)?"$3":"$2-$3"))+e;case 115:return~c(e,"stretch")?ne(s(e,"stretch","fill-available"),t)+e:e}break;case 4949:if(115!==u(e,t+1))break;case 6444:switch(u(e,p(e)-3-(~c(e,"!important")&&10))){case 107:return s(e,":",":"+z)+e;case 101:return s(e,/(.+:)([^;!]+)(;|!.+)?/,"$1"+z+(45===u(e,14)?"inline-":"")+"box$3$1"+z+"$2$3$1"+D+"$2box$3")+e}break;case 5936:switch(u(e,t+11)){case 114:return z+e+D+s(e,/[svh]\w+-[tblr]{2}/,"tb")+e;case 108:return z+e+D+s(e,/[svh]\w+-[tblr]{2}/,"tb-rl")+e;case 45:return z+e+D+s(e,/[svh]\w+-[tblr]{2}/,"lr")+e}return z+e+D+e+e}return e}var re=[function(e,t,n,r){if(e.length>-1&&!e.return)switch(e.type){case V:e.return=ne(e.value,e.length);break;case W:return U([C(e,{value:s(e.value,"@","@"+z)})],r);case B:if(e.length)return function(e,t){return e.map(t).join("")}(e.props,(function(t){switch(function(e,t){return(e=t.exec(e))?e[0]:e}(t,/(::plac\w+|:read-\w+)/)){case":read-only":case":read-write":return U([C(e,{props:[s(t,/:(read-\w+)/,":-moz-$1")]})],r);case"::placeholder":return U([C(e,{props:[s(t,/:(plac\w+)/,":"+z+"input-$1")]}),C(e,{props:[s(t,/:(plac\w+)/,":-moz-$1")]}),C(e,{props:[s(t,/:(plac\w+)/,D+"input-$1")]})],r)}return""}))}}],oe=function(e){var t=e.key;if("css"===t){var n=document.querySelectorAll("style[data-emotion]:not([data-s])");Array.prototype.forEach.call(n,(function(e){-1!==e.getAttribute("data-emotion").indexOf(" ")&&(document.head.appendChild(e),e.setAttribute("data-s",""))}))}var o,a,l=e.stylisPlugins||re,i={},s=[];o=e.container||document.head,Array.prototype.forEach.call(document.querySelectorAll('style[data-emotion^="'+t+' "]'),(function(e){for(var t=e.getAttribute("data-emotion").split(" "),n=1;n{"use strict";n.d(t,{A:()=>a});var r=n(918),o=/^((children|dangerouslySetInnerHTML|key|ref|autoFocus|defaultValue|defaultChecked|innerHTML|suppressContentEditableWarning|suppressHydrationWarning|valueLink|abbr|accept|acceptCharset|accessKey|action|allow|allowUserMedia|allowPaymentRequest|allowFullScreen|allowTransparency|alt|async|autoComplete|autoPlay|capture|cellPadding|cellSpacing|challenge|charSet|checked|cite|classID|className|cols|colSpan|content|contentEditable|contextMenu|controls|controlsList|coords|crossOrigin|data|dateTime|decoding|default|defer|dir|disabled|disablePictureInPicture|disableRemotePlayback|download|draggable|encType|enterKeyHint|fetchpriority|fetchPriority|form|formAction|formEncType|formMethod|formNoValidate|formTarget|frameBorder|headers|height|hidden|high|href|hrefLang|htmlFor|httpEquiv|id|inputMode|integrity|is|keyParams|keyType|kind|label|lang|list|loading|loop|low|marginHeight|marginWidth|max|maxLength|media|mediaGroup|method|min|minLength|multiple|muted|name|nonce|noValidate|open|optimum|pattern|placeholder|playsInline|poster|preload|profile|radioGroup|readOnly|referrerPolicy|rel|required|reversed|role|rows|rowSpan|sandbox|scope|scoped|scrolling|seamless|selected|shape|size|sizes|slot|span|spellCheck|src|srcDoc|srcLang|srcSet|start|step|style|summary|tabIndex|target|title|translate|type|useMap|value|width|wmode|wrap|about|datatype|inlist|prefix|property|resource|typeof|vocab|autoCapitalize|autoCorrect|autoSave|color|incremental|fallback|inert|itemProp|itemScope|itemType|itemID|itemRef|on|option|results|security|unselectable|accentHeight|accumulate|additive|alignmentBaseline|allowReorder|alphabetic|amplitude|arabicForm|ascent|attributeName|attributeType|autoReverse|azimuth|baseFrequency|baselineShift|baseProfile|bbox|begin|bias|by|calcMode|capHeight|clip|clipPathUnits|clipPath|clipRule|colorInterpolation|colorInterpolationFilters|colorProfile|colorRendering|contentScriptType|contentStyleType|cursor|cx|cy|d|decelerate|descent|diffuseConstant|direction|display|divisor|dominantBaseline|dur|dx|dy|edgeMode|elevation|enableBackground|end|exponent|externalResourcesRequired|fill|fillOpacity|fillRule|filter|filterRes|filterUnits|floodColor|floodOpacity|focusable|fontFamily|fontSize|fontSizeAdjust|fontStretch|fontStyle|fontVariant|fontWeight|format|from|fr|fx|fy|g1|g2|glyphName|glyphOrientationHorizontal|glyphOrientationVertical|glyphRef|gradientTransform|gradientUnits|hanging|horizAdvX|horizOriginX|ideographic|imageRendering|in|in2|intercept|k|k1|k2|k3|k4|kernelMatrix|kernelUnitLength|kerning|keyPoints|keySplines|keyTimes|lengthAdjust|letterSpacing|lightingColor|limitingConeAngle|local|markerEnd|markerMid|markerStart|markerHeight|markerUnits|markerWidth|mask|maskContentUnits|maskUnits|mathematical|mode|numOctaves|offset|opacity|operator|order|orient|orientation|origin|overflow|overlinePosition|overlineThickness|panose1|paintOrder|pathLength|patternContentUnits|patternTransform|patternUnits|pointerEvents|points|pointsAtX|pointsAtY|pointsAtZ|preserveAlpha|preserveAspectRatio|primitiveUnits|r|radius|refX|refY|renderingIntent|repeatCount|repeatDur|requiredExtensions|requiredFeatures|restart|result|rotate|rx|ry|scale|seed|shapeRendering|slope|spacing|specularConstant|specularExponent|speed|spreadMethod|startOffset|stdDeviation|stemh|stemv|stitchTiles|stopColor|stopOpacity|strikethroughPosition|strikethroughThickness|string|stroke|strokeDasharray|strokeDashoffset|strokeLinecap|strokeLinejoin|strokeMiterlimit|strokeOpacity|strokeWidth|surfaceScale|systemLanguage|tableValues|targetX|targetY|textAnchor|textDecoration|textRendering|textLength|to|transform|u1|u2|underlinePosition|underlineThickness|unicode|unicodeBidi|unicodeRange|unitsPerEm|vAlphabetic|vHanging|vIdeographic|vMathematical|values|vectorEffect|version|vertAdvY|vertOriginX|vertOriginY|viewBox|viewTarget|visibility|widths|wordSpacing|writingMode|x|xHeight|x1|x2|xChannelSelector|xlinkActuate|xlinkArcrole|xlinkHref|xlinkRole|xlinkShow|xlinkTitle|xlinkType|xmlBase|xmlns|xmlnsXlink|xmlLang|xmlSpace|y|y1|y2|yChannelSelector|z|zoomAndPan|for|class|autofocus)|(([Dd][Aa][Tt][Aa]|[Aa][Rr][Ii][Aa]|x)-.*))$/,a=(0,r.A)((function(e){return o.test(e)||111===e.charCodeAt(0)&&110===e.charCodeAt(1)&&e.charCodeAt(2)<91}))},918:(e,t,n)=>{"use strict";function r(e){var t=Object.create(null);return function(n){return void 0===t[n]&&(t[n]=e(n)),t[n]}}n.d(t,{A:()=>r})},9369:(e,t,n)=>{"use strict";n.d(t,{C:()=>c,E:()=>g,T:()=>d,c:()=>h,h:()=>p,w:()=>u});var r=n(5043),o=n(3803),a=n(1722),l=n(6598),i=n(9436),s=r.createContext("undefined"!==typeof HTMLElement?(0,o.A)({key:"css"}):null),c=s.Provider,u=function(e){return(0,r.forwardRef)((function(t,n){var o=(0,r.useContext)(s);return e(t,o,n)}))},d=r.createContext({});var p={}.hasOwnProperty,f="__EMOTION_TYPE_PLEASE_DO_NOT_USE__",h=function(e,t){var n={};for(var r in t)p.call(t,r)&&(n[r]=t[r]);return n[f]=e,n},m=function(e){var t=e.cache,n=e.serialized,r=e.isStringTag;return(0,a.SF)(t,n,r),(0,i.s)((function(){return(0,a.sk)(t,n,r)})),null},g=u((function(e,t,n){var o=e.css;"string"===typeof o&&void 0!==t.registered[o]&&(o=t.registered[o]);var i=e[f],s=[o],c="";"string"===typeof e.className?c=(0,a.Rk)(t.registered,s,e.className):null!=e.className&&(c=e.className+" ");var u=(0,l.J)(s,void 0,r.useContext(d));c+=t.key+"-"+u.name;var h={};for(var g in e)p.call(e,g)&&"css"!==g&&g!==f&&(h[g]=e[g]);return h.className=c,n&&(h.ref=n),r.createElement(r.Fragment,null,r.createElement(m,{cache:t,serialized:u,isStringTag:"string"===typeof i}),r.createElement(i,h))}))},3290:(e,t,n)=>{"use strict";n.d(t,{AH:()=>u,i7:()=>d,mL:()=>c});var r=n(9369),o=n(5043),a=n(1722),l=n(9436),i=n(6598),s=(n(3803),n(219),function(e,t){var n=arguments;if(null==t||!r.h.call(t,"css"))return o.createElement.apply(void 0,n);var a=n.length,l=new Array(a);l[0]=r.E,l[1]=(0,r.c)(e,t);for(var i=2;i{"use strict";n.d(t,{J:()=>g});var r={animationIterationCount:1,aspectRatio:1,borderImageOutset:1,borderImageSlice:1,borderImageWidth:1,boxFlex:1,boxFlexGroup:1,boxOrdinalGroup:1,columnCount:1,columns:1,flex:1,flexGrow:1,flexPositive:1,flexShrink:1,flexNegative:1,flexOrder:1,gridRow:1,gridRowEnd:1,gridRowSpan:1,gridRowStart:1,gridColumn:1,gridColumnEnd:1,gridColumnSpan:1,gridColumnStart:1,msGridRow:1,msGridRowSpan:1,msGridColumn:1,msGridColumnSpan:1,fontWeight:1,lineHeight:1,opacity:1,order:1,orphans:1,scale:1,tabSize:1,widows:1,zIndex:1,zoom:1,WebkitLineClamp:1,fillOpacity:1,floodOpacity:1,stopOpacity:1,strokeDasharray:1,strokeDashoffset:1,strokeMiterlimit:1,strokeOpacity:1,strokeWidth:1},o=n(918),a=!1,l=/[A-Z]|^ms/g,i=/_EMO_([^_]+?)_([^]*?)_EMO_/g,s=function(e){return 45===e.charCodeAt(1)},c=function(e){return null!=e&&"boolean"!==typeof e},u=(0,o.A)((function(e){return s(e)?e:e.replace(l,"-$&").toLowerCase()})),d=function(e,t){switch(e){case"animation":case"animationName":if("string"===typeof t)return t.replace(i,(function(e,t,n){return h={name:t,styles:n,next:h},t}))}return 1===r[e]||s(e)||"number"!==typeof t||0===t?t:t+"px"},p="Component selectors can only be used in conjunction with @emotion/babel-plugin, the swc Emotion plugin, or another Emotion-aware compiler transform.";function f(e,t,n){if(null==n)return"";var r=n;if(void 0!==r.__emotion_styles)return r;switch(typeof n){case"boolean":return"";case"object":var o=n;if(1===o.anim)return h={name:o.name,styles:o.styles,next:h},o.name;var l=n;if(void 0!==l.styles){var i=l.next;if(void 0!==i)for(;void 0!==i;)h={name:i.name,styles:i.styles,next:h},i=i.next;return l.styles+";"}return function(e,t,n){var r="";if(Array.isArray(n))for(var o=0;o=4;++r,o-=4)t=1540483477*(65535&(t=255&e.charCodeAt(r)|(255&e.charCodeAt(++r))<<8|(255&e.charCodeAt(++r))<<16|(255&e.charCodeAt(++r))<<24))+(59797*(t>>>16)<<16),n=1540483477*(65535&(t^=t>>>24))+(59797*(t>>>16)<<16)^1540483477*(65535&n)+(59797*(n>>>16)<<16);switch(o){case 3:n^=(255&e.charCodeAt(r+2))<<16;case 2:n^=(255&e.charCodeAt(r+1))<<8;case 1:n=1540483477*(65535&(n^=255&e.charCodeAt(r)))+(59797*(n>>>16)<<16)}return(((n=1540483477*(65535&(n^=n>>>13))+(59797*(n>>>16)<<16))^n>>>15)>>>0).toString(36)}(o)+s;return{name:c,styles:o,next:h}}},9436:(e,t,n)=>{"use strict";var r;n.d(t,{i:()=>i,s:()=>l});var o=n(5043),a=!!(r||(r=n.t(o,2))).useInsertionEffect&&(r||(r=n.t(o,2))).useInsertionEffect,l=a||function(e){return e()},i=a||o.useLayoutEffect},1722:(e,t,n)=>{"use strict";n.d(t,{Rk:()=>r,SF:()=>o,sk:()=>a});function r(e,t,n){var r="";return n.split(" ").forEach((function(n){void 0!==e[n]?t.push(e[n]+";"):n&&(r+=n+" ")})),r}var o=function(e,t,n){var r=e.key+"-"+t.name;!1===n&&void 0===e.registered[r]&&(e.registered[r]=t.styles)},a=function(e,t,n){o(e,t,n);var r=e.key+"-"+t.name;if(void 0===e.inserted[t.name]){var a=t;do{e.insert(t===a?"."+r:"",a,e.sheet,!0),a=a.next}while(void 0!==a)}}},869:(e,t,n)=>{"use strict";n.d(t,{A:()=>a});n(5043);var r=n(3290),o=n(579);function a(e){const{styles:t,defaultTheme:n={}}=e,a="function"===typeof t?e=>{return t(void 0===(r=e)||null===r||0===Object.keys(r).length?n:e);var r}:t;return(0,o.jsx)(r.mL,{styles:a})}},7688:(e,t,n)=>{"use strict";n.r(t),n.d(t,{GlobalStyles:()=>x.A,StyledEngineProvider:()=>w,ThemeContext:()=>o.T,css:()=>g.AH,default:()=>C,internal_processStyles:()=>S,keyframes:()=>g.i7});var r=n(8168),o=n(9369),a=n(6598),l=n(9436),i=n(1722),s=n(5043),c=n(1068),u=c.A,d=function(e){return"theme"!==e},p=function(e){return"string"===typeof e&&e.charCodeAt(0)>96?u:d},f=function(e,t,n){var r;if(t){var o=t.shouldForwardProp;r=e.__emotion_forwardProp&&o?function(t){return e.__emotion_forwardProp(t)&&o(t)}:o}return"function"!==typeof r&&n&&(r=e.__emotion_forwardProp),r},h=function(e){var t=e.cache,n=e.serialized,r=e.isStringTag;return(0,i.SF)(t,n,r),(0,l.s)((function(){return(0,i.sk)(t,n,r)})),null},m=function e(t,n){var l,c,u=t.__emotion_real===t,d=u&&t.__emotion_base||t;void 0!==n&&(l=n.label,c=n.target);var m=f(t,n,u),g=m||p(d),v=!g("as");return function(){var b=arguments,y=u&&void 0!==t.__emotion_styles?t.__emotion_styles.slice(0):[];if(void 0!==l&&y.push("label:"+l+";"),null==b[0]||void 0===b[0].raw)y.push.apply(y,b);else{var w=b[0];y.push(w[0]);for(var x=b.length,C=1;C{Array.isArray(e.__emotion_styles)&&(e.__emotion_styles=t(e.__emotion_styles))}},7266:(e,t,n)=>{"use strict";var r=n(4994);t.X4=f,t.e$=h,t.tL=g,t.eM=function(e,t){const n=p(e),r=p(t);return(Math.max(n,r)+.05)/(Math.min(n,r)+.05)},t.a=m;var o=r(n(457)),a=r(n(6531));function l(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:0,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:1;return(0,a.default)(e,t,n)}function i(e){e=e.slice(1);const t=new RegExp(".{1,".concat(e.length>=6?2:1,"}"),"g");let n=e.match(t);return n&&1===n[0].length&&(n=n.map((e=>e+e))),n?"rgb".concat(4===n.length?"a":"","(").concat(n.map(((e,t)=>t<3?parseInt(e,16):Math.round(parseInt(e,16)/255*1e3)/1e3)).join(", "),")"):""}function s(e){if(e.type)return e;if("#"===e.charAt(0))return s(i(e));const t=e.indexOf("("),n=e.substring(0,t);if(-1===["rgb","rgba","hsl","hsla","color"].indexOf(n))throw new Error((0,o.default)(9,e));let r,a=e.substring(t+1,e.length-1);if("color"===n){if(a=a.split(" "),r=a.shift(),4===a.length&&"/"===a[3].charAt(0)&&(a[3]=a[3].slice(1)),-1===["srgb","display-p3","a98-rgb","prophoto-rgb","rec-2020"].indexOf(r))throw new Error((0,o.default)(10,r))}else a=a.split(",");return a=a.map((e=>parseFloat(e))),{type:n,values:a,colorSpace:r}}const c=e=>{const t=s(e);return t.values.slice(0,3).map(((e,n)=>-1!==t.type.indexOf("hsl")&&0!==n?"".concat(e,"%"):e)).join(" ")};function u(e){const{type:t,colorSpace:n}=e;let{values:r}=e;return-1!==t.indexOf("rgb")?r=r.map(((e,t)=>t<3?parseInt(e,10):e)):-1!==t.indexOf("hsl")&&(r[1]="".concat(r[1],"%"),r[2]="".concat(r[2],"%")),r=-1!==t.indexOf("color")?"".concat(n," ").concat(r.join(" ")):"".concat(r.join(", ")),"".concat(t,"(").concat(r,")")}function d(e){e=s(e);const{values:t}=e,n=t[0],r=t[1]/100,o=t[2]/100,a=r*Math.min(o,1-o),l=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:(e+n/30)%12;return o-a*Math.max(Math.min(t-3,9-t,1),-1)};let i="rgb";const c=[Math.round(255*l(0)),Math.round(255*l(8)),Math.round(255*l(4))];return"hsla"===e.type&&(i+="a",c.push(t[3])),u({type:i,values:c})}function p(e){let t="hsl"===(e=s(e)).type||"hsla"===e.type?s(d(e)).values:e.values;return t=t.map((t=>("color"!==e.type&&(t/=255),t<=.03928?t/12.92:((t+.055)/1.055)**2.4))),Number((.2126*t[0]+.7152*t[1]+.0722*t[2]).toFixed(3))}function f(e,t){return e=s(e),t=l(t),"rgb"!==e.type&&"hsl"!==e.type||(e.type+="a"),"color"===e.type?e.values[3]="/".concat(t):e.values[3]=t,u(e)}function h(e,t){if(e=s(e),t=l(t),-1!==e.type.indexOf("hsl"))e.values[2]*=1-t;else if(-1!==e.type.indexOf("rgb")||-1!==e.type.indexOf("color"))for(let n=0;n<3;n+=1)e.values[n]*=1-t;return u(e)}function m(e,t){if(e=s(e),t=l(t),-1!==e.type.indexOf("hsl"))e.values[2]+=(100-e.values[2])*t;else if(-1!==e.type.indexOf("rgb"))for(let n=0;n<3;n+=1)e.values[n]+=(255-e.values[n])*t;else if(-1!==e.type.indexOf("color"))for(let n=0;n<3;n+=1)e.values[n]+=(1-e.values[n])*t;return u(e)}function g(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:.15;return p(e)>.5?h(e,t):m(e,t)}},8052:(e,t,n)=>{"use strict";var r=n(4994);t.Ay=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{themeId:t,defaultTheme:n=m,rootShouldForwardProp:r=h,slotShouldForwardProp:s=h}=e,u=e=>(0,c.default)((0,o.default)({},e,{theme:v((0,o.default)({},e,{defaultTheme:n,themeId:t}))}));return u.__mui_systemSx=!0,function(e){let c=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};(0,l.internal_processStyles)(e,(e=>e.filter((e=>!(null!=e&&e.__mui_systemSx)))));const{name:d,slot:f,skipVariantsResolver:m,skipSx:w,overridesResolver:x=b(g(f))}=c,C=(0,a.default)(c,p),S=void 0!==m?m:f&&"Root"!==f&&"root"!==f||!1,k=w||!1;let R=h;"Root"===f||"root"===f?R=r:f?R=s:function(e){return"string"===typeof e&&e.charCodeAt(0)>96}(e)&&(R=void 0);const A=(0,l.default)(e,(0,o.default)({shouldForwardProp:R,label:undefined},C)),E=e=>"function"===typeof e&&e.__emotion_real!==e||(0,i.isPlainObject)(e)?r=>y(e,(0,o.default)({},r,{theme:v({theme:r.theme,defaultTheme:n,themeId:t})})):e,P=function(r){let a=E(r);for(var l=arguments.length,i=new Array(l>1?l-1:0),s=1;s{const r=v((0,o.default)({},e,{defaultTheme:n,themeId:t}));if(!r.components||!r.components[d]||!r.components[d].styleOverrides)return null;const a=r.components[d].styleOverrides,l={};return Object.entries(a).forEach((t=>{let[n,a]=t;l[n]=y(a,(0,o.default)({},e,{theme:r}))})),x(e,l)})),d&&!S&&c.push((e=>{var r;const a=v((0,o.default)({},e,{defaultTheme:n,themeId:t}));return y({variants:null==a||null==(r=a.components)||null==(r=r[d])?void 0:r.variants},(0,o.default)({},e,{theme:a}))})),k||c.push(u);const p=c.length-i.length;if(Array.isArray(r)&&p>0){const e=new Array(p).fill("");a=[...r,...e],a.raw=[...r.raw,...e]}const f=A(a,...c);return e.muiName&&(f.muiName=e.muiName),f};return A.withConfig&&(P.withConfig=A.withConfig),P}};var o=r(n(4634)),a=r(n(4893)),l=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=f(t);if(n&&n.has(e))return n.get(e);var r={__proto__:null},o=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var a in e)if("default"!==a&&Object.prototype.hasOwnProperty.call(e,a)){var l=o?Object.getOwnPropertyDescriptor(e,a):null;l&&(l.get||l.set)?Object.defineProperty(r,a,l):r[a]=e[a]}return r.default=e,n&&n.set(e,r),r}(n(7688)),i=n(9482),s=(r(n(7918)),r(n(3382)),r(n(4989))),c=r(n(3234));const u=["ownerState"],d=["variants"],p=["name","slot","skipVariantsResolver","skipSx","overridesResolver"];function f(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(f=function(e){return e?n:t})(e)}function h(e){return"ownerState"!==e&&"theme"!==e&&"sx"!==e&&"as"!==e}const m=(0,s.default)(),g=e=>e?e.charAt(0).toLowerCase()+e.slice(1):e;function v(e){let{defaultTheme:t,theme:n,themeId:r}=e;return o=n,0===Object.keys(o).length?t:n[r]||n;var o}function b(e){return e?(t,n)=>n[e]:null}function y(e,t){let{ownerState:n}=t,r=(0,a.default)(t,u);const l="function"===typeof e?e((0,o.default)({ownerState:n},r)):e;if(Array.isArray(l))return l.flatMap((e=>y(e,(0,o.default)({ownerState:n},r))));if(l&&"object"===typeof l&&Array.isArray(l.variants)){const{variants:e=[]}=l;let t=(0,a.default)(l,d);return e.forEach((e=>{let a=!0;"function"===typeof e.props?a=e.props((0,o.default)({ownerState:n},r,n)):Object.keys(e.props).forEach((t=>{(null==n?void 0:n[t])!==e.props[t]&&r[t]!==e.props[t]&&(a=!1)})),a&&(Array.isArray(t)||(t=[t]),t.push("function"===typeof e.style?e.style((0,o.default)({ownerState:n},r,n)):e.style))})),t}return l}},9751:(e,t,n)=>{"use strict";n.d(t,{EU:()=>i,NI:()=>l,iZ:()=>c,kW:()=>u,vf:()=>s,zu:()=>o});var r=n(9172);const o={xs:0,sm:600,md:900,lg:1200,xl:1536},a={keys:["xs","sm","md","lg","xl"],up:e=>"@media (min-width:".concat(o[e],"px)")};function l(e,t,n){const r=e.theme||{};if(Array.isArray(t)){const e=r.breakpoints||a;return t.reduce(((r,o,a)=>(r[e.up(e.keys[a])]=n(t[a]),r)),{})}if("object"===typeof t){const e=r.breakpoints||a;return Object.keys(t).reduce(((r,a)=>{if(-1!==Object.keys(e.values||o).indexOf(a)){r[e.up(a)]=n(t[a],a)}else{const e=a;r[e]=t[e]}return r}),{})}return n(t)}function i(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};var t;return(null==(t=e.keys)?void 0:t.reduce(((t,n)=>(t[e.up(n)]={},t)),{}))||{}}function s(e,t){return e.reduce(((e,t)=>{const n=e[t];return(!n||0===Object.keys(n).length)&&delete e[t],e}),t)}function c(e){const t=i(e);for(var n=arguments.length,o=new Array(n>1?n-1:0),a=1;a(0,r.A)(e,t)),{});return s(Object.keys(t),l)}function u(e){let{values:t,breakpoints:n,base:r}=e;const o=r||function(e,t){if("object"!==typeof e)return{};const n={},r=Object.keys(t);return Array.isArray(e)?r.forEach(((t,r)=>{r{null!=e[t]&&(n[t]=!0)})),n}(t,n),a=Object.keys(o);if(0===a.length)return t;let l;return a.reduce(((e,n,r)=>(Array.isArray(t)?(e[n]=null!=t[r]?t[r]:t[l],l=r):"object"===typeof t?(e[n]=null!=t[n]?t[n]:t[l],l=n):e[n]=t,e)),{})}},9703:(e,t,n)=>{"use strict";function r(e,t){const n=this;if(n.vars&&"function"===typeof n.getColorSchemeSelector){const r=n.getColorSchemeSelector(e).replace(/(\[[^\]]+\])/,"*:where($1)");return{[r]:t}}return n.palette.mode===e?t:{}}n.d(t,{A:()=>r})},4853:(e,t,n)=>{"use strict";n.d(t,{A:()=>i});var r=n(8587),o=n(8168);const a=["values","unit","step"],l=e=>{const t=Object.keys(e).map((t=>({key:t,val:e[t]})))||[];return t.sort(((e,t)=>e.val-t.val)),t.reduce(((e,t)=>(0,o.A)({},e,{[t.key]:t.val})),{})};function i(e){const{values:t={xs:0,sm:600,md:900,lg:1200,xl:1536},unit:n="px",step:i=5}=e,s=(0,r.A)(e,a),c=l(t),u=Object.keys(c);function d(e){const r="number"===typeof t[e]?t[e]:e;return"@media (min-width:".concat(r).concat(n,")")}function p(e){const r="number"===typeof t[e]?t[e]:e;return"@media (max-width:".concat(r-i/100).concat(n,")")}function f(e,r){const o=u.indexOf(r);return"@media (min-width:".concat("number"===typeof t[e]?t[e]:e).concat(n,") and ")+"(max-width:".concat((-1!==o&&"number"===typeof t[u[o]]?t[u[o]]:r)-i/100).concat(n,")")}return(0,o.A)({keys:u,values:c,up:d,down:p,between:f,only:function(e){return u.indexOf(e)+1{"use strict";n.d(t,{A:()=>f});var r=n(8168),o=n(8587),a=n(9172),l=n(4853);const i={borderRadius:4};var s=n(8604);var c=n(8812),u=n(7758),d=n(9703);const p=["breakpoints","palette","spacing","shape"];const f=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};const{breakpoints:t={},palette:n={},spacing:f,shape:h={}}=e,m=(0,o.A)(e,p),g=(0,l.A)(t),v=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:8;if(e.mui)return e;const t=(0,s.LX)({spacing:e}),n=function(){for(var e=arguments.length,n=new Array(e),r=0;r{const n=t(e);return"number"===typeof n?"".concat(n,"px"):n})).join(" ")};return n.mui=!0,n}(f);let b=(0,a.A)({breakpoints:g,direction:"ltr",components:{},palette:(0,r.A)({mode:"light"},n),spacing:v,shape:(0,r.A)({},i,h)},m);b.applyStyles=d.A;for(var y=arguments.length,w=new Array(y>1?y-1:0),x=1;x(0,a.A)(e,t)),b),b.unstable_sxConfig=(0,r.A)({},u.A,null==m?void 0:m.unstable_sxConfig),b.unstable_sx=function(e){return(0,c.A)({sx:e,theme:this})},b}},4989:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r.A,private_createBreakpoints:()=>o.A,unstable_applyStyles:()=>a.A});var r=n(8280),o=n(4853),a=n(9703)},3815:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(9172);const o=function(e,t){return t?(0,r.A)(e,t,{clone:!1}):e}},8604:(e,t,n)=>{"use strict";n.d(t,{LX:()=>h,MA:()=>f,_W:()=>m,Lc:()=>b,Ms:()=>y});var r=n(9751),o=n(7162),a=n(3815);const l={m:"margin",p:"padding"},i={t:"Top",r:"Right",b:"Bottom",l:"Left",x:["Left","Right"],y:["Top","Bottom"]},s={marginX:"mx",marginY:"my",paddingX:"px",paddingY:"py"},c=function(e){const t={};return n=>(void 0===t[n]&&(t[n]=e(n)),t[n])}((e=>{if(e.length>2){if(!s[e])return[e];e=s[e]}const[t,n]=e.split(""),r=l[t],o=i[n]||"";return Array.isArray(o)?o.map((e=>r+e)):[r+o]})),u=["m","mt","mr","mb","ml","mx","my","margin","marginTop","marginRight","marginBottom","marginLeft","marginX","marginY","marginInline","marginInlineStart","marginInlineEnd","marginBlock","marginBlockStart","marginBlockEnd"],d=["p","pt","pr","pb","pl","px","py","padding","paddingTop","paddingRight","paddingBottom","paddingLeft","paddingX","paddingY","paddingInline","paddingInlineStart","paddingInlineEnd","paddingBlock","paddingBlockStart","paddingBlockEnd"],p=[...u,...d];function f(e,t,n,r){var a;const l=null!=(a=(0,o.Yn)(e,t,!1))?a:n;return"number"===typeof l?e=>"string"===typeof e?e:l*e:Array.isArray(l)?e=>"string"===typeof e?e:l[e]:"function"===typeof l?l:()=>{}}function h(e){return f(e,"spacing",8)}function m(e,t){if("string"===typeof t||null==t)return t;const n=e(Math.abs(t));return t>=0?n:"number"===typeof n?-n:"-".concat(n)}function g(e,t,n,o){if(-1===t.indexOf(n))return null;const a=function(e,t){return n=>e.reduce(((e,r)=>(e[r]=m(t,n),e)),{})}(c(n),o),l=e[n];return(0,r.NI)(e,l,a)}function v(e,t){const n=h(e.theme);return Object.keys(e).map((r=>g(e,t,r,n))).reduce(a.A,{})}function b(e){return v(e,u)}function y(e){return v(e,d)}function w(e){return v(e,p)}b.propTypes={},b.filterProps=u,y.propTypes={},y.filterProps=d,w.propTypes={},w.filterProps=p},7162:(e,t,n)=>{"use strict";n.d(t,{Ay:()=>i,BO:()=>l,Yn:()=>a});var r=n(7598),o=n(9751);function a(e,t){let n=!(arguments.length>2&&void 0!==arguments[2])||arguments[2];if(!t||"string"!==typeof t)return null;if(e&&e.vars&&n){const n="vars.".concat(t).split(".").reduce(((e,t)=>e&&e[t]?e[t]:null),e);if(null!=n)return n}return t.split(".").reduce(((e,t)=>e&&null!=e[t]?e[t]:null),e)}function l(e,t,n){let r,o=arguments.length>3&&void 0!==arguments[3]?arguments[3]:n;return r="function"===typeof e?e(n):Array.isArray(e)?e[n]||o:a(e,n)||o,t&&(r=t(r,o,e)),r}const i=function(e){const{prop:t,cssProperty:n=e.prop,themeKey:i,transform:s}=e,c=e=>{if(null==e[t])return null;const c=e[t],u=a(e.theme,i)||{};return(0,o.NI)(e,c,(e=>{let o=l(u,s,e);return e===o&&"string"===typeof e&&(o=l(u,s,"".concat(t).concat("default"===e?"":(0,r.A)(e)),e)),!1===n?o:{[n]:o}}))};return c.propTypes={},c.filterProps=[t],c}},7758:(e,t,n)=>{"use strict";n.d(t,{A:()=>j});var r=n(8604),o=n(7162),a=n(3815);const l=function(){for(var e=arguments.length,t=new Array(e),n=0;n(t.filterProps.forEach((n=>{e[n]=t})),e)),{}),o=e=>Object.keys(e).reduce(((t,n)=>r[n]?(0,a.A)(t,r[n](e)):t),{});return o.propTypes={},o.filterProps=t.reduce(((e,t)=>e.concat(t.filterProps)),[]),o};var i=n(9751);function s(e){return"number"!==typeof e?e:"".concat(e,"px solid")}function c(e,t){return(0,o.Ay)({prop:e,themeKey:"borders",transform:t})}const u=c("border",s),d=c("borderTop",s),p=c("borderRight",s),f=c("borderBottom",s),h=c("borderLeft",s),m=c("borderColor"),g=c("borderTopColor"),v=c("borderRightColor"),b=c("borderBottomColor"),y=c("borderLeftColor"),w=c("outline",s),x=c("outlineColor"),C=e=>{if(void 0!==e.borderRadius&&null!==e.borderRadius){const t=(0,r.MA)(e.theme,"shape.borderRadius",4,"borderRadius"),n=e=>({borderRadius:(0,r._W)(t,e)});return(0,i.NI)(e,e.borderRadius,n)}return null};C.propTypes={},C.filterProps=["borderRadius"];l(u,d,p,f,h,m,g,v,b,y,C,w,x);const S=e=>{if(void 0!==e.gap&&null!==e.gap){const t=(0,r.MA)(e.theme,"spacing",8,"gap"),n=e=>({gap:(0,r._W)(t,e)});return(0,i.NI)(e,e.gap,n)}return null};S.propTypes={},S.filterProps=["gap"];const k=e=>{if(void 0!==e.columnGap&&null!==e.columnGap){const t=(0,r.MA)(e.theme,"spacing",8,"columnGap"),n=e=>({columnGap:(0,r._W)(t,e)});return(0,i.NI)(e,e.columnGap,n)}return null};k.propTypes={},k.filterProps=["columnGap"];const R=e=>{if(void 0!==e.rowGap&&null!==e.rowGap){const t=(0,r.MA)(e.theme,"spacing",8,"rowGap"),n=e=>({rowGap:(0,r._W)(t,e)});return(0,i.NI)(e,e.rowGap,n)}return null};R.propTypes={},R.filterProps=["rowGap"];l(S,k,R,(0,o.Ay)({prop:"gridColumn"}),(0,o.Ay)({prop:"gridRow"}),(0,o.Ay)({prop:"gridAutoFlow"}),(0,o.Ay)({prop:"gridAutoColumns"}),(0,o.Ay)({prop:"gridAutoRows"}),(0,o.Ay)({prop:"gridTemplateColumns"}),(0,o.Ay)({prop:"gridTemplateRows"}),(0,o.Ay)({prop:"gridTemplateAreas"}),(0,o.Ay)({prop:"gridArea"}));function A(e,t){return"grey"===t?t:e}l((0,o.Ay)({prop:"color",themeKey:"palette",transform:A}),(0,o.Ay)({prop:"bgcolor",cssProperty:"backgroundColor",themeKey:"palette",transform:A}),(0,o.Ay)({prop:"backgroundColor",themeKey:"palette",transform:A}));function E(e){return e<=1&&0!==e?"".concat(100*e,"%"):e}const P=(0,o.Ay)({prop:"width",transform:E}),I=e=>{if(void 0!==e.maxWidth&&null!==e.maxWidth){const t=t=>{var n,r;const o=(null==(n=e.theme)||null==(n=n.breakpoints)||null==(n=n.values)?void 0:n[t])||i.zu[t];return o?"px"!==(null==(r=e.theme)||null==(r=r.breakpoints)?void 0:r.unit)?{maxWidth:"".concat(o).concat(e.theme.breakpoints.unit)}:{maxWidth:o}:{maxWidth:E(t)}};return(0,i.NI)(e,e.maxWidth,t)}return null};I.filterProps=["maxWidth"];const M=(0,o.Ay)({prop:"minWidth",transform:E}),O=(0,o.Ay)({prop:"height",transform:E}),T=(0,o.Ay)({prop:"maxHeight",transform:E}),L=(0,o.Ay)({prop:"minHeight",transform:E}),j=((0,o.Ay)({prop:"size",cssProperty:"width",transform:E}),(0,o.Ay)({prop:"size",cssProperty:"height",transform:E}),l(P,I,M,O,T,L,(0,o.Ay)({prop:"boxSizing"})),{border:{themeKey:"borders",transform:s},borderTop:{themeKey:"borders",transform:s},borderRight:{themeKey:"borders",transform:s},borderBottom:{themeKey:"borders",transform:s},borderLeft:{themeKey:"borders",transform:s},borderColor:{themeKey:"palette"},borderTopColor:{themeKey:"palette"},borderRightColor:{themeKey:"palette"},borderBottomColor:{themeKey:"palette"},borderLeftColor:{themeKey:"palette"},outline:{themeKey:"borders",transform:s},outlineColor:{themeKey:"palette"},borderRadius:{themeKey:"shape.borderRadius",style:C},color:{themeKey:"palette",transform:A},bgcolor:{themeKey:"palette",cssProperty:"backgroundColor",transform:A},backgroundColor:{themeKey:"palette",transform:A},p:{style:r.Ms},pt:{style:r.Ms},pr:{style:r.Ms},pb:{style:r.Ms},pl:{style:r.Ms},px:{style:r.Ms},py:{style:r.Ms},padding:{style:r.Ms},paddingTop:{style:r.Ms},paddingRight:{style:r.Ms},paddingBottom:{style:r.Ms},paddingLeft:{style:r.Ms},paddingX:{style:r.Ms},paddingY:{style:r.Ms},paddingInline:{style:r.Ms},paddingInlineStart:{style:r.Ms},paddingInlineEnd:{style:r.Ms},paddingBlock:{style:r.Ms},paddingBlockStart:{style:r.Ms},paddingBlockEnd:{style:r.Ms},m:{style:r.Lc},mt:{style:r.Lc},mr:{style:r.Lc},mb:{style:r.Lc},ml:{style:r.Lc},mx:{style:r.Lc},my:{style:r.Lc},margin:{style:r.Lc},marginTop:{style:r.Lc},marginRight:{style:r.Lc},marginBottom:{style:r.Lc},marginLeft:{style:r.Lc},marginX:{style:r.Lc},marginY:{style:r.Lc},marginInline:{style:r.Lc},marginInlineStart:{style:r.Lc},marginInlineEnd:{style:r.Lc},marginBlock:{style:r.Lc},marginBlockStart:{style:r.Lc},marginBlockEnd:{style:r.Lc},displayPrint:{cssProperty:!1,transform:e=>({"@media print":{display:e}})},display:{},overflow:{},textOverflow:{},visibility:{},whiteSpace:{},flexBasis:{},flexDirection:{},flexWrap:{},justifyContent:{},alignItems:{},alignContent:{},order:{},flex:{},flexGrow:{},flexShrink:{},alignSelf:{},justifyItems:{},justifySelf:{},gap:{style:S},rowGap:{style:R},columnGap:{style:k},gridColumn:{},gridRow:{},gridAutoFlow:{},gridAutoColumns:{},gridAutoRows:{},gridTemplateColumns:{},gridTemplateRows:{},gridTemplateAreas:{},gridArea:{},position:{},zIndex:{themeKey:"zIndex"},top:{},right:{},bottom:{},left:{},boxShadow:{themeKey:"shadows"},width:{transform:E},maxWidth:{style:I},minWidth:{transform:E},height:{transform:E},maxHeight:{transform:E},minHeight:{transform:E},boxSizing:{},fontFamily:{themeKey:"typography"},fontSize:{themeKey:"typography"},fontStyle:{themeKey:"typography"},fontWeight:{themeKey:"typography"},letterSpacing:{},textTransform:{},lineHeight:{},textAlign:{},typography:{cssProperty:!1,themeKey:"typography"}})},8698:(e,t,n)=>{"use strict";n.d(t,{A:()=>c});var r=n(8168),o=n(8587),a=n(9172),l=n(7758);const i=["sx"],s=e=>{var t,n;const r={systemProps:{},otherProps:{}},o=null!=(t=null==e||null==(n=e.theme)?void 0:n.unstable_sxConfig)?t:l.A;return Object.keys(e).forEach((t=>{o[t]?r.systemProps[t]=e[t]:r.otherProps[t]=e[t]})),r};function c(e){const{sx:t}=e,n=(0,o.A)(e,i),{systemProps:l,otherProps:c}=s(n);let u;return u=Array.isArray(t)?[l,...t]:"function"===typeof t?function(){const e=t(...arguments);return(0,a.Q)(e)?(0,r.A)({},l,e):l}:(0,r.A)({},l,t),(0,r.A)({},c,{sx:u})}},3234:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r.A,extendSxProp:()=>o.A,unstable_createStyleFunctionSx:()=>r.k,unstable_defaultSxConfig:()=>a.A});var r=n(8812),o=n(8698),a=n(7758)},8812:(e,t,n)=>{"use strict";n.d(t,{A:()=>u,k:()=>s});var r=n(7598),o=n(3815),a=n(7162),l=n(9751),i=n(7758);function s(){function e(e,t,n,o){const i={[e]:t,theme:n},s=o[e];if(!s)return{[e]:t};const{cssProperty:c=e,themeKey:u,transform:d,style:p}=s;if(null==t)return null;if("typography"===u&&"inherit"===t)return{[e]:t};const f=(0,a.Yn)(n,u)||{};if(p)return p(i);return(0,l.NI)(i,t,(t=>{let n=(0,a.BO)(f,d,t);return t===n&&"string"===typeof t&&(n=(0,a.BO)(f,d,"".concat(e).concat("default"===t?"":(0,r.A)(t)),t)),!1===c?n:{[c]:n}}))}return function t(n){var r;const{sx:a,theme:s={}}=n||{};if(!a)return null;const c=null!=(r=s.unstable_sxConfig)?r:i.A;function u(n){let r=n;if("function"===typeof n)r=n(s);else if("object"!==typeof n)return n;if(!r)return null;const a=(0,l.EU)(s.breakpoints),i=Object.keys(a);let u=a;return Object.keys(r).forEach((n=>{const a=(i=r[n],d=s,"function"===typeof i?i(d):i);var i,d;if(null!==a&&void 0!==a)if("object"===typeof a)if(c[n])u=(0,o.A)(u,e(n,a,s,c));else{const e=(0,l.NI)({theme:s},a,(e=>({[n]:e})));!function(){for(var e=arguments.length,t=new Array(e),n=0;ne.concat(Object.keys(t))),[]),o=new Set(r);return t.every((e=>o.size===Object.keys(e).length))}(e,a)?u=(0,o.A)(u,e):u[n]=t({sx:a,theme:s})}else u=(0,o.A)(u,e(n,a,s,c))})),(0,l.vf)(i,u)}return Array.isArray(a)?a.map(u):u(a)}}const c=s();c.filterProps=["sx"];const u=c},2374:(e,t,n)=>{"use strict";t.A=void 0;var r=function(e,t){if(!t&&e&&e.__esModule)return e;if(null===e||"object"!=typeof e&&"function"!=typeof e)return{default:e};var n=a(t);if(n&&n.has(e))return n.get(e);var r={__proto__:null},o=Object.defineProperty&&Object.getOwnPropertyDescriptor;for(var l in e)if("default"!==l&&Object.prototype.hasOwnProperty.call(e,l)){var i=o?Object.getOwnPropertyDescriptor(e,l):null;i&&(i.get||i.set)?Object.defineProperty(r,l,i):r[l]=e[l]}return r.default=e,n&&n.set(e,r),r}(n(5043)),o=n(7688);function a(e){if("function"!=typeof WeakMap)return null;var t=new WeakMap,n=new WeakMap;return(a=function(e){return e?n:t})(e)}t.A=function(){let e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:null;const t=r.useContext(o.ThemeContext);return t&&(n=t,0!==Object.keys(n).length)?t:e;var n}},7598:(e,t,n)=>{"use strict";n.d(t,{A:()=>o});var r=n(7868);function o(e){if("string"!==typeof e)throw new Error((0,r.A)(7));return e.charAt(0).toUpperCase()+e.slice(1)}},7918:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r.A});var r=n(7598)},1188:(e,t,n)=>{"use strict";n.d(t,{A:()=>r});const r=function(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:Number.MIN_SAFE_INTEGER,n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:Number.MAX_SAFE_INTEGER;return Math.max(t,Math.min(e,n))}},6531:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r.A});var r=n(1188)},9172:(e,t,n)=>{"use strict";n.d(t,{A:()=>i,Q:()=>a});var r=n(8168),o=n(5043);function a(e){if("object"!==typeof e||null===e)return!1;const t=Object.getPrototypeOf(e);return(null===t||t===Object.prototype||null===Object.getPrototypeOf(t))&&!(Symbol.toStringTag in e)&&!(Symbol.iterator in e)}function l(e){if(o.isValidElement(e)||!a(e))return e;const t={};return Object.keys(e).forEach((n=>{t[n]=l(e[n])})),t}function i(e,t){let n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:{clone:!0};const s=n.clone?(0,r.A)({},e):e;return a(e)&&a(t)&&Object.keys(t).forEach((r=>{o.isValidElement(t[r])?s[r]=t[r]:a(t[r])&&Object.prototype.hasOwnProperty.call(e,r)&&a(e[r])?s[r]=i(e[r],t[r],n):n.clone?s[r]=a(t[r])?l(t[r]):t[r]:s[r]=t[r]})),s}},9482:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r.A,isPlainObject:()=>r.Q});var r=n(9172)},7868:(e,t,n)=>{"use strict";function r(e){let t="https://mui.com/production-error/?code="+e;for(let n=1;nr})},457:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>r.A});var r=n(7868)},3382:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>s,getFunctionName:()=>a});var r=n(528);const o=/^\s*function(?:\s|\s*\/\*.*\*\/\s*)+([^(\s/]*)\s*/;function a(e){const t="".concat(e).match(o);return t&&t[1]||""}function l(e){let t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:"";return e.displayName||e.name||a(e)||t}function i(e,t,n){const r=l(t);return e.displayName||(""!==r?"".concat(n,"(").concat(r,")"):n)}function s(e){if(null!=e){if("string"===typeof e)return e;if("function"===typeof e)return l(e,"Component");if("object"===typeof e)switch(e.$$typeof){case r.vM:return i(e,e.render,"ForwardRef");case r.lD:return i(e,e.type,"memo");default:return}}}},219:(e,t,n)=>{"use strict";var r=n(3763),o={childContextTypes:!0,contextType:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromError:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},a={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},l={$$typeof:!0,compare:!0,defaultProps:!0,displayName:!0,propTypes:!0,type:!0},i={};function s(e){return r.isMemo(e)?l:i[e.$$typeof]||o}i[r.ForwardRef]={$$typeof:!0,render:!0,defaultProps:!0,displayName:!0,propTypes:!0},i[r.Memo]=l;var c=Object.defineProperty,u=Object.getOwnPropertyNames,d=Object.getOwnPropertySymbols,p=Object.getOwnPropertyDescriptor,f=Object.getPrototypeOf,h=Object.prototype;e.exports=function e(t,n,r){if("string"!==typeof n){if(h){var o=f(n);o&&o!==h&&e(t,o,r)}var l=u(n);d&&(l=l.concat(d(n)));for(var i=s(t),m=s(n),g=0;g{"use strict";var n="function"===typeof Symbol&&Symbol.for,r=n?Symbol.for("react.element"):60103,o=n?Symbol.for("react.portal"):60106,a=n?Symbol.for("react.fragment"):60107,l=n?Symbol.for("react.strict_mode"):60108,i=n?Symbol.for("react.profiler"):60114,s=n?Symbol.for("react.provider"):60109,c=n?Symbol.for("react.context"):60110,u=n?Symbol.for("react.async_mode"):60111,d=n?Symbol.for("react.concurrent_mode"):60111,p=n?Symbol.for("react.forward_ref"):60112,f=n?Symbol.for("react.suspense"):60113,h=n?Symbol.for("react.suspense_list"):60120,m=n?Symbol.for("react.memo"):60115,g=n?Symbol.for("react.lazy"):60116,v=n?Symbol.for("react.block"):60121,b=n?Symbol.for("react.fundamental"):60117,y=n?Symbol.for("react.responder"):60118,w=n?Symbol.for("react.scope"):60119;function x(e){if("object"===typeof e&&null!==e){var t=e.$$typeof;switch(t){case r:switch(e=e.type){case u:case d:case a:case i:case l:case f:return e;default:switch(e=e&&e.$$typeof){case c:case p:case g:case m:case s:return e;default:return t}}case o:return t}}}function C(e){return x(e)===d}t.AsyncMode=u,t.ConcurrentMode=d,t.ContextConsumer=c,t.ContextProvider=s,t.Element=r,t.ForwardRef=p,t.Fragment=a,t.Lazy=g,t.Memo=m,t.Portal=o,t.Profiler=i,t.StrictMode=l,t.Suspense=f,t.isAsyncMode=function(e){return C(e)||x(e)===u},t.isConcurrentMode=C,t.isContextConsumer=function(e){return x(e)===c},t.isContextProvider=function(e){return x(e)===s},t.isElement=function(e){return"object"===typeof e&&null!==e&&e.$$typeof===r},t.isForwardRef=function(e){return x(e)===p},t.isFragment=function(e){return x(e)===a},t.isLazy=function(e){return x(e)===g},t.isMemo=function(e){return x(e)===m},t.isPortal=function(e){return x(e)===o},t.isProfiler=function(e){return x(e)===i},t.isStrictMode=function(e){return x(e)===l},t.isSuspense=function(e){return x(e)===f},t.isValidElementType=function(e){return"string"===typeof e||"function"===typeof e||e===a||e===d||e===i||e===l||e===f||e===h||"object"===typeof e&&null!==e&&(e.$$typeof===g||e.$$typeof===m||e.$$typeof===s||e.$$typeof===c||e.$$typeof===p||e.$$typeof===b||e.$$typeof===y||e.$$typeof===w||e.$$typeof===v)},t.typeOf=x},3763:(e,t,n)=>{"use strict";e.exports=n(4983)},2123:e=>{"use strict";var t=Object.getOwnPropertySymbols,n=Object.prototype.hasOwnProperty,r=Object.prototype.propertyIsEnumerable;e.exports=function(){try{if(!Object.assign)return!1;var e=new String("abc");if(e[5]="de","5"===Object.getOwnPropertyNames(e)[0])return!1;for(var t={},n=0;n<10;n++)t["_"+String.fromCharCode(n)]=n;if("0123456789"!==Object.getOwnPropertyNames(t).map((function(e){return t[e]})).join(""))return!1;var r={};return"abcdefghijklmnopqrst".split("").forEach((function(e){r[e]=e})),"abcdefghijklmnopqrst"===Object.keys(Object.assign({},r)).join("")}catch(o){return!1}}()?Object.assign:function(e,o){for(var a,l,i=function(e){if(null===e||void 0===e)throw new TypeError("Object.assign cannot be called with null or undefined");return Object(e)}(e),s=1;s{"use strict";var r=n(3218);function o(){}function a(){}a.resetWarningCache=o,e.exports=function(){function e(e,t,n,o,a,l){if(l!==r){var i=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw i.name="Invariant Violation",i}}function t(){return e}e.isRequired=e;var n={array:e,bigint:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,elementType:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t,checkPropTypes:a,resetWarningCache:o};return n.PropTypes=n,n}},5173:(e,t,n)=>{e.exports=n(1497)()},3218:e=>{"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},2730:(e,t,n)=>{"use strict";var r=n(5043),o=n(2123),a=n(8853);function l(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n