diff --git a/conf/vanilla/autoload_configs/switch.conf.xml b/conf/vanilla/autoload_configs/switch.conf.xml
index 6c8250f6cb..d508ad1844 100644
--- a/conf/vanilla/autoload_configs/switch.conf.xml
+++ b/conf/vanilla/autoload_configs/switch.conf.xml
@@ -34,6 +34,8 @@
as different hostnames.
-->
+
+
diff --git a/src/include/private/switch_core_pvt.h b/src/include/private/switch_core_pvt.h
index e344489917..d7933b4880 100644
--- a/src/include/private/switch_core_pvt.h
+++ b/src/include/private/switch_core_pvt.h
@@ -259,6 +259,7 @@ struct switch_runtime {
uint32_t debug_level;
uint32_t runlevel;
uint32_t tipping_point;
+ uint32_t cpu_idle_smoothing_depth;
uint32_t microseconds_per_tick;
int32_t timer_affinity;
switch_profile_timer_t *profile_timer;
diff --git a/src/switch_core.c b/src/switch_core.c
index 912efc3644..160af2838b 100644
--- a/src/switch_core.c
+++ b/src/switch_core.c
@@ -1976,6 +1976,8 @@ static void switch_load_core_config(const char *file)
switch_core_min_idle_cpu(atof(val));
} else if (!strcasecmp(var, "tipping-point") && !zstr(val)) {
runtime.tipping_point = atoi(val);
+ } else if (!strcasecmp(var, "cpu-idle-smoothing-depth") && !zstr(val)) {
+ runtime.cpu_idle_smoothing_depth = atoi(val);
} else if (!strcasecmp(var, "events-use-dispatch") && !zstr(val)) {
runtime.events_use_dispatch = switch_true(val);
} else if (!strcasecmp(var, "initial-event-threads") && !zstr(val)) {
diff --git a/src/switch_profile.c b/src/switch_profile.c
index 0f64a5b5f7..fe61243c4b 100644
--- a/src/switch_profile.c
+++ b/src/switch_profile.c
@@ -32,7 +32,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "switch.h"
-#include "switch_profile.h"
+#include "private/switch_core_pvt.h"
#ifdef __linux__
#include
@@ -51,6 +51,9 @@ struct profile_timer
/* last calculated percentage of idle time */
double last_percentage_of_idle_time;
+ double *percentage_of_idle_time_ring;
+ unsigned int last_idle_time_index;
+ unsigned int cpu_idle_smoothing_depth;
#ifdef __linux__
/* the cpu feature gets disabled on errors */
@@ -207,8 +210,21 @@ SWITCH_DECLARE(switch_bool_t) switch_get_system_idle_time(switch_profile_timer_t
halftime = totaltime / 2UL;
- p->last_percentage_of_idle_time = ((100 * idletime + halftime) / totaltime);
+ p->last_idle_time_index += 1;
+ if ( p->last_idle_time_index >= p->cpu_idle_smoothing_depth ) {
+ p->last_idle_time_index = 0;
+ }
+ p->percentage_of_idle_time_ring[p->last_idle_time_index] = ((100 * idletime + halftime) / totaltime);
+
+ p->last_percentage_of_idle_time = 0;
+ for ( int x = 0; x < p->cpu_idle_smoothing_depth; x++ ) {
+ //switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_CRIT, "IDLE TIME: (%d)[%lf]\n", x, p->percentage_of_idle_time_ring[x]);
+ p->last_percentage_of_idle_time += p->percentage_of_idle_time_ring[x];
+ }
+ p->last_percentage_of_idle_time /= p->cpu_idle_smoothing_depth;
+
*idle_percentage = p->last_percentage_of_idle_time;
+ //switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_CRIT, "IDLE TIME finalized: [%lf]\n", *idle_percentage);
p->last_user_time = user;
p->last_nice_time = nice;
@@ -246,7 +262,18 @@ SWITCH_DECLARE(switch_bool_t) switch_get_system_idle_time(switch_profile_timer_t
__int64 i64Kernel = i64KernelTime - p->i64LastKernelTime;
__int64 i64Idle = i64IdleTime - p->i64LastIdleTime;
__int64 i64System = i64User + i64Kernel;
- *idle_percentage = 100.0 * i64Idle / i64System;
+
+ p->last_idle_time_index += 1;
+ if ( p->last_idle_time_index >= p->cpu_idle_smoothing_depth ) {
+ p->last_idle_time_index = 0;
+ }
+ p->percentage_of_idle_time_ring[p->last_idle_time_index] = 100.0 * i64Idle / i64System;
+
+ *idle_percentage = 0;
+ for ( int x = 0; x < p->cpu_idle_smoothing_depth; x++ ) {
+ *idle_percentage += p->percentage_of_idle_time_ring[x];
+ }
+ *idle_percentage /= p->cpu_idle_smoothing_depth;
} else {
*idle_percentage = 100.0;
p->valid_last_times = 1;
@@ -274,7 +301,21 @@ SWITCH_DECLARE(switch_bool_t) switch_get_system_idle_time(switch_profile_timer_t
SWITCH_DECLARE(switch_profile_timer_t *)switch_new_profile_timer(void)
{
- return calloc(1, sizeof(switch_profile_timer_t));
+ switch_profile_timer_t *p = calloc(1, sizeof(switch_profile_timer_t));
+
+ if ( runtime.cpu_idle_smoothing_depth && runtime.cpu_idle_smoothing_depth > 0 ) {
+ p->cpu_idle_smoothing_depth = runtime.cpu_idle_smoothing_depth;
+ } else {
+ p->cpu_idle_smoothing_depth = 30;
+ }
+
+ p->percentage_of_idle_time_ring = calloc(1, sizeof(double) * p->cpu_idle_smoothing_depth);
+
+ for ( int x = 0; x < p->cpu_idle_smoothing_depth; x++ ) {
+ p->percentage_of_idle_time_ring[x] = 100.0;
+ }
+
+ return p;
}
SWITCH_DECLARE(void) switch_delete_profile_timer(switch_profile_timer_t **p)
@@ -284,6 +325,7 @@ SWITCH_DECLARE(void) switch_delete_profile_timer(switch_profile_timer_t **p)
#ifdef __linux__
close((*p)->procfd);
#endif
+ free((*p)->percentage_of_idle_time_ring);
free(*p);
*p = NULL;
}