Skip to content

Commit

Permalink
fix(Slack): Fixed slack messages when scaler can't to scale more up a…
Browse files Browse the repository at this point in the history
…nd down (#6)
  • Loading branch information
dfradehubs authored Nov 6, 2024
1 parent 3c46e07 commit d235620
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 21 deletions.
12 changes: 6 additions & 6 deletions internal/cmd/run/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ func RunCommand(cmd *cobra.Command, args []string) {
// If the up condition is met, add a node to the MIG
if upCondition {
log.Printf("Up condition %s met: Trying to create a new node!", ctx.Config.Metrics.Prometheus.UpCondition)
currentSize, maxSize, err := google.AddNodeToMIG(&ctx)
if err != nil {
currentSize, maxSize, ok, err := google.AddNodeToMIG(&ctx)
if err != nil && !ok {
log.Printf("Error adding node to MIG: %v", err)
if ctx.Config.Notifications.Slack.WebhookURL != "" {
message := fmt.Sprintf("Error adding node to MIG: %v", err)
Expand All @@ -117,7 +117,7 @@ func RunCommand(cmd *cobra.Command, args []string) {
continue
}
// Notify via Slack that a node has been added
if ctx.Config.Notifications.Slack.WebhookURL != "" {
if ctx.Config.Notifications.Slack.WebhookURL != "" && ok {
message := fmt.Sprintf("Added new node to MIG %s. Current size is %d nodes and the maximum nodes to create are %d", ctx.Config.Infrastructure.GCP.MIGName, currentSize, maxSize)
err = slack.NotifySlack(message, ctx.Config.Notifications.Slack.WebhookURL)
if err != nil {
Expand Down Expand Up @@ -147,8 +147,8 @@ func RunCommand(cmd *cobra.Command, args []string) {
// If the down condition is met, remove a node from the MIG
if downCondition {
log.Printf("Down condition %s met. Trying to remove one node!", ctx.Config.Metrics.Prometheus.DownCondition)
currentSize, minSize, nodeRemoved, err := google.RemoveNodeFromMIG(&ctx)
if err != nil {
currentSize, minSize, nodeRemoved, ok, err := google.RemoveNodeFromMIG(&ctx)
if err != nil && !ok {
log.Printf("Error draining node from MIG: %v", err)
if ctx.Config.Notifications.Slack.WebhookURL != "" {
message := fmt.Sprintf("Error draining node from MIG: %v", err)
Expand All @@ -161,7 +161,7 @@ func RunCommand(cmd *cobra.Command, args []string) {
continue
}
// Notify via Slack that a node has been removed
if ctx.Config.Notifications.Slack.WebhookURL != "" {
if ctx.Config.Notifications.Slack.WebhookURL != "" && ok {
message := fmt.Sprintf("Removed node %s from MIG %s. Current size is %d nodes and the minimum nodes to exist are %d", nodeRemoved, ctx.Config.Infrastructure.GCP.MIGName, currentSize, minSize)
err = slack.NotifySlack(message, ctx.Config.Notifications.Slack.WebhookURL)
if err != nil {
Expand Down
32 changes: 17 additions & 15 deletions internal/google/mig.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,20 +20,20 @@ import (
)

// AddNodeToMIG increases the size of the Managed Instance Group (MIG) by 1, if it has not reached the maximum limit.
func AddNodeToMIG(ctx *v1alpha1.Context) (int32, int32, error) {
func AddNodeToMIG(ctx *v1alpha1.Context) (int32, int32, bool, error) {
ctxConn := context.Background()

// Create a new Compute client for managing the MIG
client, err := createComputeClient(ctxConn, ctx, compute.NewInstanceGroupManagersRESTClient)
if err != nil {
return 0, 0, fmt.Errorf("failed to create Instance Group Managers client: %v", err)
return 0, 0, false, fmt.Errorf("failed to create Instance Group Managers client: %v", err)
}
defer client.Close()

// Get the current target size of the MIG
targetSize, err := getMIGTargetSize(ctxConn, client, ctx)
if err != nil {
return 0, 0, fmt.Errorf("failed to get MIG target size: %v", err)
return 0, 0, false, fmt.Errorf("failed to get MIG target size: %v", err)
}
log.Printf("Current size of MIG is %d nodes", targetSize)

Expand All @@ -42,7 +42,8 @@ func AddNodeToMIG(ctx *v1alpha1.Context) (int32, int32, error) {

// Check if the MIG has reached its maximum size
if targetSize >= maxSize {
return 0, 0, fmt.Errorf("MIG has reached its maximum size (%d/%d), no further scaling is possible", targetSize, maxSize)
log.Printf("MIG has reached its maximum size (%d/%d), no further scaling is possible", targetSize, maxSize)
return 0, 0, true, nil
}

// Create a request to resize the MIG by increasing the target size by 1
Expand All @@ -57,29 +58,29 @@ func AddNodeToMIG(ctx *v1alpha1.Context) (int32, int32, error) {
if !ctx.Config.Autoscaler.DebugMode {
_, err = client.Resize(ctxConn, req)
if err != nil {
return 0, 0, err
return 0, 0, false, err
} else {
log.Printf("Scaled up MIG successfully %d/%d", targetSize+1, maxSize)
}
}
return targetSize + 1, maxSize, nil
return targetSize + 1, maxSize, true, nil
}

// RemoveNodeFromMIG decreases the size of the Managed Instance Group (MIG) by 1, if it has not reached the minimum limit.
func RemoveNodeFromMIG(ctx *v1alpha1.Context) (int32, int32, string, error) {
func RemoveNodeFromMIG(ctx *v1alpha1.Context) (int32, int32, string, bool, error) {
ctxConn := context.Background()

// Create a new Compute client for managing the MIG
client, err := createComputeClient(ctxConn, ctx, compute.NewInstanceGroupManagersRESTClient)
if err != nil {
return 0, 0, "", fmt.Errorf("failed to create Instance Group Managers client: %v", err)
return 0, 0, "", false, fmt.Errorf("failed to create Instance Group Managers client: %v", err)
}
defer client.Close()

// Get the current target size of the MIG
targetSize, err := getMIGTargetSize(ctxConn, client, ctx)
if err != nil {
return 0, 0, "", fmt.Errorf("failed to get MIG target size: %v", err)
return 0, 0, "", false, fmt.Errorf("failed to get MIG target size: %v", err)
}
log.Printf("Current size of MIG is %d nodes", targetSize)

Expand All @@ -88,13 +89,14 @@ func RemoveNodeFromMIG(ctx *v1alpha1.Context) (int32, int32, string, error) {

// Check if the MIG has reached its minimum size
if targetSize <= minSize {
return 0, 0, "", fmt.Errorf("MIG has reached its minimum size (%d/%d), no further scaling down is possible", targetSize, minSize)
log.Printf("MIG has reached its minimum size (%d/%d), no further scaling down is possible", targetSize, minSize)
return 0, 0, "", true, nil
}

// Get a random instance from the MIG to remove
instanceToRemove, err := GetInstanceToRemove(ctxConn, client, ctx)
if err != nil {
return 0, 0, "", fmt.Errorf("error getting instance to remove: %v", err)
return 0, 0, "", false, fmt.Errorf("error getting instance to remove: %v", err)
}

// If not in debug mode, drain the node from Elasticsearch before removal
Expand All @@ -105,7 +107,7 @@ func RemoveNodeFromMIG(ctx *v1alpha1.Context) (int32, int32, string, error) {
log.Printf("Instance to remove: %s. Draining from elasticsearch cluster", instanceToRemove)
err = elasticsearch.DrainElasticsearchNode(ctx, instanceToRemove)
if err != nil {
return 0, 0, "", fmt.Errorf("error draining Elasticsearch node: %v", err)
return 0, 0, "", false, fmt.Errorf("error draining Elasticsearch node: %v", err)
}
log.Printf("Instance drained successfully from elasticsearch cluster")
}
Expand All @@ -125,7 +127,7 @@ func RemoveNodeFromMIG(ctx *v1alpha1.Context) (int32, int32, string, error) {
if !ctx.Config.Autoscaler.DebugMode {
_, err = client.DeleteInstances(ctxConn, deleteReq)
if err != nil {
return 0, 0, "", fmt.Errorf("error deleting instance: %v", err)
return 0, 0, "", false, fmt.Errorf("error deleting instance: %v", err)
}
}

Expand All @@ -144,12 +146,12 @@ func RemoveNodeFromMIG(ctx *v1alpha1.Context) (int32, int32, string, error) {
// Remove the elasticsearch node from cluster settings
err = elasticsearch.ClearElasticsearchClusterSettings(ctx, instanceToRemove)
if err != nil {
return 0, 0, "", fmt.Errorf("error clearing Elasticsearch cluster settings: %v", err)
return 0, 0, "", false, fmt.Errorf("error clearing Elasticsearch cluster settings: %v", err)
}
log.Printf("Cleared up elasticsearch settings for draining node")
}

return targetSize - 1, minSize, instanceToRemove, nil
return targetSize - 1, minSize, instanceToRemove, true, nil
}

// getMIGScalingLimits retrieves the minimum and maximum scaling limits for a Managed Instance Group (MIG).
Expand Down

0 comments on commit d235620

Please sign in to comment.