diff --git a/modules/load_balancer/doc/load_balancer_admin.xml b/modules/load_balancer/doc/load_balancer_admin.xml
index 1ef33b439bf..d35dbed75b2 100644
--- a/modules/load_balancer/doc/load_balancer_admin.xml
+++ b/modules/load_balancer/doc/load_balancer_admin.xml
@@ -539,6 +539,35 @@ modparam("load_balancer", "cluster_sharing_tag", "vip")
+
+ use_cpu_factor (integer)
+
+ This is only relevant for "integrated estimation" mode.
+
+
+ If enabled, the CPU factor collected in the most recent heartbeat
+ will be used to reduce the capacity of each FreeSWITCH instance.
+
+
+ When disabled, no CPU factor will be applied in the calculation.
+
+
+
+
+
+
+ Default value is empty (disabled)
.
+
+
+ Set use_cpu_factor parameter
+
+...
+modparam("load_balancer", "use_cpu_factor", 1)
+...
+
+
+
+
@@ -592,8 +621,12 @@ modparam("load_balancer", "cluster_sharing_tag", "vip")
performed using the most recent heartbeat data and a
counter of all sessions allocated since the last heartbeat.
Profile counting is unused in the calculation. The reported
- CPU load value is used to reduce session load on systems
+ CPU load value is optionally used to reduce session load on systems
with high CPU utilisation. Mutually exclusive with flag "r".
+
+ This is well suited to high performance systems where many calls
+ may arrive within the heartbeat period (which should be set to the
+ minimum value 1s when used with this algorithm).
diff --git a/modules/load_balancer/lb_data.c b/modules/load_balancer/lb_data.c
index 2c614cccca9..60505dd7f86 100644
--- a/modules/load_balancer/lb_data.c
+++ b/modules/load_balancer/lb_data.c
@@ -38,6 +38,7 @@
/* dialog stuff */
extern struct dlg_binds lb_dlg_binds;
+extern int use_cpu_factor;
extern int fetch_freeswitch_stats;
extern int initial_fs_load;
extern struct fs_binds fs_api;
@@ -429,11 +430,17 @@ static int get_dst_load(struct lb_resource **res, unsigned int res_no,
if( dst->rmap[l].max_load )
av = 100 - (100 * lb_dlg_binds.get_profile_size(res[k]->profile, &dst->profile_id) / dst->rmap[l].max_load);
} else if( flags & LB_FLAGS_PERCENT_WITH_CPU ) {
- /* generate score based on the percentage of channels occupied, reduced by CPU idle factor */
if( dst->rmap[l].max_sessions ) {
- av = ( 100 - ( 100 * ( dst->rmap[l].current_sessions + dst->rmap[l].sessions_since_last_heartbeat ) / dst->rmap[l].max_sessions ) ) * dst->rmap[l].cpu_idle;
- LM_DBG("destination %d <%.*s> availability score %d (sessions=%d since_last_hb=%d max_sess=%d cpu_idle=%.2f)", dst->id, dst->uri.len, dst->uri.s, av, dst->rmap[l].current_sessions, dst->rmap[l].sessions_since_last_heartbeat, dst->rmap[l].max_sessions, dst->rmap[l].cpu_idle);
- }
+ if(use_cpu_factor) {
+ /* generate score based on the percentage of channels occupied, reduced by CPU idle factor */
+ av = ( 100 - ( 100 * ( dst->rmap[l].current_sessions + dst->rmap[l].sessions_since_last_heartbeat ) / dst->rmap[l].max_sessions ) ) * dst->rmap[l].cpu_idle;
+ LM_DBG("destination %d <%.*s> availability score %d (sessions=%d since_last_hb=%d max_sess=%d cpu_idle=%.2f)", dst->id, dst->uri.len, dst->uri.s, av, dst->rmap[l].current_sessions, dst->rmap[l].sessions_since_last_heartbeat, dst->rmap[l].max_sessions, dst->rmap[l].cpu_idle);
+ } else {
+ /* generate score based on the percentage of channels occupied */
+ av = 100 - ( 100 * ( dst->rmap[l].current_sessions + dst->rmap[l].sessions_since_last_heartbeat ) / dst->rmap[l].max_sessions );
+ LM_DBG("destination %d <%.*s> availability score %d (sessions=%d since_last_hb=%d max_sess=%d)", dst->id, dst->uri.len, dst->uri.s, av, dst->rmap[l].current_sessions, dst->rmap[l].sessions_since_last_heartbeat, dst->rmap[l].max_sessions);
+ }
+ }
} else {
av = dst->rmap[l].max_load - lb_dlg_binds.get_profile_size(res[k]->profile, &dst->profile_id);
}
diff --git a/modules/load_balancer/load_balancer.c b/modules/load_balancer/load_balancer.c
index ddb94026349..9db380a13d6 100644
--- a/modules/load_balancer/load_balancer.c
+++ b/modules/load_balancer/load_balancer.c
@@ -62,6 +62,8 @@ str lb_probe_from = str_init("sip:prober@localhost");
static int* probing_reply_codes = NULL;
static int probing_codes_no = 0;
+int use_cpu_factor;
+
int fetch_freeswitch_stats;
int initial_fs_load = 1000;
@@ -173,6 +175,7 @@ static const param_export_t mod_params[]={
{ "cluster_id", INT_PARAM, &lb_cluster_id },
{ "cluster_sharing_tag", STR_PARAM, &lb_cluster_shtag },
{ "fetch_freeswitch_stats", INT_PARAM, &fetch_freeswitch_stats },
+ { "use_cpu_factor", INT_PARAM, &use_cpu_factor },
{ "initial_freeswitch_load", INT_PARAM, &initial_fs_load },
{ 0,0,0 }
};
@@ -299,7 +302,7 @@ static void lb_inherit_state(struct lb_data *old_data,struct lb_data *new_data)
strncasecmp(new_dst->uri.s, old_dst->uri.s, old_dst->uri.len)==0) {
LM_DBG("DST %d/<%.*s> found in old set, copying state\n",
new_dst->group, new_dst->uri.len,new_dst->uri.s);
- /* first reset the existing flags (only the flags related
+ /* first reset the existing flags (only the flags related
* to state!!!) */
new_dst->flags &=
~(LB_DST_STAT_DSBL_FLAG|LB_DST_STAT_NOEN_FLAG);
@@ -569,7 +572,7 @@ static int w_lb_start(struct sip_msg *req, int *grp_no,
return -5;
}
flags |= LB_FLAGS_PERCENT_WITH_CPU;
- LM_DBG("using integrated estimation (percentage of max sessions with CPU factor estimation) \n");
+ LM_DBG("using integrated estimation (percentage of max sessions used, tracing real time allocations) \n");
break;
case 'n':
flags |= LB_FLAGS_NEGATIVE;
@@ -807,6 +810,7 @@ static void lb_update_max_loads(unsigned int ticks, void *param)
dst->rmap[ri].resource->profile, &dst->profile_id);
old = dst->rmap[ri].max_load;
+ // if ( flags & LB_FLAGS_PERCENT_WITH_CPU ) { todo flags not avavilable here
/*
* In LB_FLAGS_PERCENT_WITH_CPU mode we capture the raw values and use these in each LB calculation. This
* means we do not use profile counting in the load calculation. This is suitable for