diff --git a/modules/load_balancer/doc/load_balancer_admin.xml b/modules/load_balancer/doc/load_balancer_admin.xml
index 6044d53182b..ac7ff55abf3 100644
--- a/modules/load_balancer/doc/load_balancer_admin.xml
+++ b/modules/load_balancer/doc/load_balancer_admin.xml
@@ -539,35 +539,6 @@ modparam("load_balancer", "cluster_sharing_tag", "vip")
-
- use_cpu_factor (integer)
-
- This is only relevant for "integrated estimation" mode.
-
-
- If enabled, the CPU factor collected in the most recent heartbeat
- will be used to reduce the capacity of each FreeSWITCH instance.
-
-
- When disabled, no CPU factor will be applied in the calculation.
-
-
-
-
-
-
- Default value is empty (disabled)
.
-
-
- Set use_cpu_factor parameter
-
-...
-modparam("load_balancer", "use_cpu_factor", 1)
-...
-
-
-
-
@@ -621,12 +592,8 @@ modparam("load_balancer", "use_cpu_factor", 1)
performed using the most recent heartbeat data and a
counter of all sessions allocated since the last heartbeat.
Profile counting is unused in the calculation. The reported
- CPU load value is optionally used to reduce session load on systems
+ CPU load value is used to reduce session load on systems
with high CPU utilisation. Mutually exclusive with flag "r".
-
- This is well suited to high performance systems where many calls
- may arrive within the heartbeat period (which should be set to the
- minimum value 1s when used with this algorithm).
diff --git a/modules/load_balancer/lb_data.c b/modules/load_balancer/lb_data.c
index ed2a9178a0a..2c614cccca9 100644
--- a/modules/load_balancer/lb_data.c
+++ b/modules/load_balancer/lb_data.c
@@ -38,7 +38,6 @@
/* dialog stuff */
extern struct dlg_binds lb_dlg_binds;
-extern int use_cpu_factor;
extern int fetch_freeswitch_stats;
extern int initial_fs_load;
extern struct fs_binds fs_api;
@@ -430,16 +429,10 @@ static int get_dst_load(struct lb_resource **res, unsigned int res_no,
if( dst->rmap[l].max_load )
av = 100 - (100 * lb_dlg_binds.get_profile_size(res[k]->profile, &dst->profile_id) / dst->rmap[l].max_load);
} else if( flags & LB_FLAGS_PERCENT_WITH_CPU ) {
+ /* generate score based on the percentage of channels occupied, reduced by CPU idle factor */
if( dst->rmap[l].max_sessions ) {
- if(use_cpu_factor) {
- /* generate score based on the percentage of channels occupied, reduced by CPU idle factor */
- av = ( 100 - ( 100 * ( dst->rmap[l].current_sessions + dst->rmap[l].sessions_since_last_heartbeat ) / dst->rmap[l].max_sessions ) ) * dst->rmap[l].cpu_idle;
- LM_DBG("destination %d <%.*s> availability score %d (sessions=%d since_last_hb=%d max_sess=%d cpu_idle=%.2f)", dst->id, dst->uri.len, dst->uri.s, av, dst->rmap[l].current_sessions, dst->rmap[l].sessions_since_last_heartbeat, dst->rmap[l].max_sessions, dst->rmap[l].cpu_idle);
- } else {
- /* generate score based on the percentage of channels occupied */
- av = 100 - ( 100 * ( dst->rmap[l].current_sessions + dst->rmap[l].sessions_since_last_heartbeat ) / dst->rmap[l].max_sessions );
- LM_DBG("destination %d <%.*s> availability score %d (sessions=%d since_last_hb=%d max_sess=%d)", dst->id, dst->uri.len, dst->uri.s, av, dst->rmap[l].current_sessions, dst->rmap[l].sessions_since_last_heartbeat, dst->rmap[l].max_sessions);
- }
+ av = ( 100 - ( 100 * ( dst->rmap[l].current_sessions + dst->rmap[l].sessions_since_last_heartbeat ) / dst->rmap[l].max_sessions ) ) * dst->rmap[l].cpu_idle;
+ LM_DBG("destination %d <%.*s> availability score %d (sessions=%d since_last_hb=%d max_sess=%d cpu_idle=%.2f)", dst->id, dst->uri.len, dst->uri.s, av, dst->rmap[l].current_sessions, dst->rmap[l].sessions_since_last_heartbeat, dst->rmap[l].max_sessions, dst->rmap[l].cpu_idle);
}
} else {
av = dst->rmap[l].max_load - lb_dlg_binds.get_profile_size(res[k]->profile, &dst->profile_id);
diff --git a/modules/load_balancer/load_balancer.c b/modules/load_balancer/load_balancer.c
index 8b07558d967..2033cd42146 100644
--- a/modules/load_balancer/load_balancer.c
+++ b/modules/load_balancer/load_balancer.c
@@ -62,8 +62,6 @@ str lb_probe_from = str_init("sip:prober@localhost");
static int* probing_reply_codes = NULL;
static int probing_codes_no = 0;
-int use_cpu_factor;
-
int fetch_freeswitch_stats;
int initial_fs_load = 1000;
@@ -175,7 +173,6 @@ static param_export_t mod_params[]={
{ "cluster_id", INT_PARAM, &lb_cluster_id },
{ "cluster_sharing_tag", STR_PARAM, &lb_cluster_shtag },
{ "fetch_freeswitch_stats", INT_PARAM, &fetch_freeswitch_stats },
- { "use_cpu_factor", INT_PARAM, &use_cpu_factor },
{ "initial_freeswitch_load", INT_PARAM, &initial_fs_load },
{ 0,0,0 }
};
@@ -302,7 +299,7 @@ static void lb_inherit_state(struct lb_data *old_data,struct lb_data *new_data)
strncasecmp(new_dst->uri.s, old_dst->uri.s, old_dst->uri.len)==0) {
LM_DBG("DST %d/<%.*s> found in old set, copying state\n",
new_dst->group, new_dst->uri.len,new_dst->uri.s);
- /* first reset the existing flags (only the flags related
+ /* first reset the existing flags (only the flags related
* to state!!!) */
new_dst->flags &=
~(LB_DST_STAT_DSBL_FLAG|LB_DST_STAT_NOEN_FLAG);
@@ -572,7 +569,7 @@ static int w_lb_start(struct sip_msg *req, int *grp_no,
return -5;
}
flags |= LB_FLAGS_PERCENT_WITH_CPU;
- LM_DBG("using integrated estimation (percentage of max sessions used, tracing real time allocations) \n");
+ LM_DBG("using integrated estimation (percentage of max sessions with CPU factor estimation) \n");
break;
case 'n':
flags |= LB_FLAGS_NEGATIVE;
@@ -813,7 +810,6 @@ static void lb_update_max_loads(unsigned int ticks, void *param)
dst->rmap[ri].resource->profile, &dst->profile_id);
old = dst->rmap[ri].max_load;
- // if ( flags & LB_FLAGS_PERCENT_WITH_CPU ) { todo flags not avavilable here
/*
* In LB_FLAGS_PERCENT_WITH_CPU mode we capture the raw values and use these in each LB calculation. This
* means we do not use profile counting in the load calculation. This is suitable for