From 98f54a4075bf29a2f97e508323ce77da571a3108 Mon Sep 17 00:00:00 2001 From: Walter Doekes Date: Tue, 11 May 2021 15:15:09 +0200 Subject: [PATCH] presence: Fix database purge of activewatchers (clustering, no fallback2db) When clustering sharing_tags were added to presence, they were added to the fallback2db "on" case only: There are a couple of dimensions with differing behaviours: +------------+------------+ | fallback2- | fallback2- | | -db = on | -db = off | +-clustering:-+------------+------------+ | - no | OK | OK | | - tagless | PR-2519 | PR-2519 | | - active | OK | this | +-------------+------------+------------+ The non-OK behaviour above refers to the activewatcher table getting filled up with stale/expired items. fallback2db on or off: ``` modparam("presence", "fallback2db", 0) # or 1=on ``` The no-clustering case: ``` handle_subscribe(); ``` The tagless case: ``` modparam("presence", "cluster_id", 1) modparam("clusterer", "my_node_id", 2) handle_subscribe(); ``` The active case: ``` modparam("presence", "cluster_id", 1) modparam("clusterer", "my_node_id", 2) modparam("clusterer", "sharing_tag", "node2/1=active") handle_subscribe("0", "node2"); ``` Where PR #2519 fixes the tagless case, this PR fixes the fallback2db=0 case by writing the sharing_tag to the database so the records can get found and cleaned up. (Sidenote: subscriptions which ended with a timeout or 481 *would* get cleaned up. This makes sense in all cases: if they have an error before their expiry, it makes sense to purge them from the DB immediately. And it's not a problem if the perioding cleanup had cleaned those records already.) --- modules/presence/subscribe.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/modules/presence/subscribe.c b/modules/presence/subscribe.c index d8b91a3edee..c91ef479145 100644 --- a/modules/presence/subscribe.c +++ b/modules/presence/subscribe.c @@ -1364,9 +1364,10 @@ void update_db_subs(db_con_t *db,db_func_t *dbf, shtable_t hash_table, db_op_t update_ops[2]; subs_t* del_s; int pres_uri_col, to_user_col, to_domain_col, from_user_col, from_domain_col, - callid_col, totag_col, fromtag_col, event_col,status_col, event_id_col, + callid_col, totag_col, fromtag_col, event_col, status_col, event_id_col, local_cseq_col, remote_cseq_col, expires_col, record_route_col, - contact_col, local_contact_col, version_col,socket_info_col,reason_col; + contact_col, local_contact_col, version_col, socket_info_col, + sharing_tag_col, reason_col; int u_expires_col, u_local_cseq_col, u_remote_cseq_col, u_version_col, u_reason_col, u_status_col, u_contact_col; int i; @@ -1472,14 +1473,19 @@ void update_db_subs(db_con_t *db,db_func_t *dbf, shtable_t hash_table, query_vals[local_contact_col].nul = 0; n_query_cols++; + query_cols[version_col= n_query_cols]=&str_version_col; + query_vals[version_col].type = DB_INT; + query_vals[version_col].nul = 0; + n_query_cols++; + query_cols[socket_info_col= n_query_cols] =&str_socket_info_col; query_vals[socket_info_col].type = DB_STR; query_vals[socket_info_col].nul = 0; n_query_cols++; - query_cols[version_col= n_query_cols]=&str_version_col; - query_vals[version_col].type = DB_INT; - query_vals[version_col].nul = 0; + query_cols[sharing_tag_col= n_query_cols] =&str_sharing_tag_col; + query_vals[sharing_tag_col].type = DB_STR; + query_vals[sharing_tag_col].nul = 0; n_query_cols++; /* cols and values used for update */ @@ -1639,10 +1645,16 @@ void update_db_subs(db_con_t *db,db_func_t *dbf, shtable_t hash_table, query_vals[socket_info_col].val.str_val.s = 0; query_vals[socket_info_col].val.str_val.len = 0; } + if (s->sh_tag.len == 0) { + query_vals[sharing_tag_col].nul = 1; + } else { + query_vals[sharing_tag_col].nul = 0; + query_vals[sharing_tag_col].val.str_val = s->sh_tag; + } CON_SET_CURR_PS(db, &my_ps_insert); if (dbf->insert( db, query_cols, query_vals, - n_query_cols) < 0) + n_query_cols) < 0) { LM_ERR("unsuccessful sql insert\n"); }