2011-11-22 00:04:02 +01:00
/** ==========================================================================
* 2011 by KjellKod . cc . This is PUBLIC DOMAIN to use at your own risk and comes
* with no warranties . This code is yours to share , use and modify with no
* strings attached and no restrictions or obligations .
2015-02-02 08:31:43 +01:00
*
2014-07-03 23:42:19 +02:00
* For more information see g3log / LICENSE or refer refer to http : //unlicense.org
2011-11-22 00:04:02 +01:00
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
2011-11-05 17:36:07 +01:00
2015-07-20 07:10:56 +02:00
// through CMakeLists.txt #define of GOOGLE_GLOG_PERFORMANCE and G3LOG_PERFORMANCE
2011-11-05 17:36:07 +01:00
# include "performance.h"
# include <algorithm>
# include <cmath>
2023-12-01 00:17:45 +01:00
# include <map>
# include <thread>
# include <vector>
2011-11-05 17:36:07 +01:00
2015-07-20 07:10:56 +02:00
# if defined(G3LOG_PERFORMANCE)
2023-12-01 00:17:45 +01:00
const std : : string title {
" G3LOG " } ;
2011-11-05 17:36:07 +01:00
# elif defined(GOOGLE_GLOG_PERFORMANCE)
2023-12-01 00:17:45 +01:00
const std : : string title {
" GOOGLE__GLOG " } ;
2011-11-05 17:36:07 +01:00
# else
2015-07-20 07:10:56 +02:00
# error G3LOG_PERFORMANCE or GOOGLE_GLOG_PERFORMANCE was not defined
2011-11-05 17:36:07 +01:00
# endif
2011-11-17 22:46:38 +01:00
# if (defined(WIN32) || defined(_WIN32) || defined(__WIN32__))
2023-12-01 00:17:45 +01:00
const std : : string g_path {
" ./ " } ;
2011-11-17 22:46:38 +01:00
# else
2023-12-01 00:17:45 +01:00
const std : : string g_path {
" /tmp/ " } ;
2011-11-17 22:46:38 +01:00
# endif
2015-07-20 07:10:56 +02:00
using namespace g3_test ;
2011-11-05 17:36:07 +01:00
2011-11-16 22:23:47 +01:00
//
// OK: The code below isn't pretty but it works. Lots and lots of log entries
// to keep track of!
//
2023-12-01 00:17:45 +01:00
int main ( int argc , char * * argv ) {
size_t number_of_threads { 0 } ;
if ( argc = = 2 ) {
2015-02-02 08:31:43 +01:00
number_of_threads = atoi ( argv [ 1 ] ) ;
}
2023-12-01 00:17:45 +01:00
if ( argc ! = 2 | | number_of_threads = = 0 ) {
2015-02-02 08:31:43 +01:00
std : : cerr < < " USAGE is: " < < argv [ 0 ] < < " number_threads " < < std : : endl ;
return 1 ;
}
std : : ostringstream thread_count_oss ;
thread_count_oss < < number_of_threads ;
2023-12-01 00:17:45 +01:00
const std : : string g_prefix_log_name = title + " -performance- " + thread_count_oss . str ( ) + " threads-WORST_LOG " ;
const std : : string g_measurement_dump = g_path + g_prefix_log_name + " _RESULT.txt " ;
const std : : string g_measurement_bucket_dump = g_path + g_prefix_log_name + " _RESULT_buckets.txt " ;
const uint64_t us_to_ms {
1000 } ;
const uint64_t us_to_s {
1000000 } ;
2015-02-02 08:31:43 +01:00
std : : ostringstream oss ;
2023-12-01 00:17:45 +01:00
oss < < " \n \n "
< < title < < " performance " < < number_of_threads < < " threads WORST (PEAK) times \n " ;
2015-02-02 08:31:43 +01:00
oss < < " Each thread running #: " < < g_loop < < " * " < < g_iterations < < " iterations of log entries " < < std : : endl ; // worst mean case is about 10us per log entry
2023-12-01 00:17:45 +01:00
const uint64_t xtra_margin {
2 } ;
2015-02-02 08:31:43 +01:00
oss < < " *** It can take som time. Please wait: Approximate wait time on MY PC was: " < < number_of_threads * ( uint64_t ) ( g_iterations * 10 * xtra_margin / us_to_s ) < < " seconds " < < std : : endl ;
writeTextToFile ( g_measurement_dump , oss . str ( ) , kAppend ) ;
2023-12-01 00:17:45 +01:00
oss . str ( " " ) ; // clear the stream
2011-11-05 17:36:07 +01:00
2015-07-20 07:10:56 +02:00
# if defined(G3LOG_PERFORMANCE)
2015-08-19 18:08:41 +02:00
auto worker = g3 : : LogWorker : : createLogWorker ( ) ;
2023-12-01 00:17:45 +01:00
auto handle = worker - > addDefaultLogger ( g_prefix_log_name , g_path ) ;
2015-08-19 18:08:41 +02:00
g3 : : initializeLogging ( worker . get ( ) ) ;
2013-11-08 10:31:32 +01:00
2011-11-05 17:36:07 +01:00
# elif defined(GOOGLE_GLOG_PERFORMANCE)
2015-02-02 08:31:43 +01:00
google : : InitGoogleLogging ( argv [ 0 ] ) ;
2011-11-05 17:36:07 +01:00
# endif
2015-02-02 08:31:43 +01:00
std : : thread * threads = new std : : thread [ number_of_threads ] ;
std : : vector < uint64_t > * threads_result = new std : : vector < uint64_t > [ number_of_threads ] ;
// kiss: just loop, create threads, store them then join
// could probably do this more elegant with lambdas
2023-12-01 00:17:45 +01:00
for ( uint64_t idx = 0 ; idx < number_of_threads ; + + idx ) {
2015-02-02 08:31:43 +01:00
threads_result [ idx ] . reserve ( g_iterations ) ;
}
auto start_time = std : : chrono : : high_resolution_clock : : now ( ) ;
2023-12-01 00:17:45 +01:00
for ( uint64_t idx = 0 ; idx < number_of_threads ; + + idx ) {
2015-02-02 08:31:43 +01:00
std : : ostringstream count ;
count < < idx + 1 ;
2023-12-01 00:17:45 +01:00
std : : string thread_name = title + " _T " + count . str ( ) ;
2015-02-02 08:31:43 +01:00
std : : cout < < " Creating thread: " < < thread_name < < std : : endl ;
threads [ idx ] = std : : thread ( measurePeakDuringLogWrites , thread_name , std : : ref ( threads_result [ idx ] ) ) ;
}
// wait for thread finishing
2023-12-01 00:17:45 +01:00
for ( uint64_t idx = 0 ; idx < number_of_threads ; + + idx ) {
2015-02-02 08:31:43 +01:00
threads [ idx ] . join ( ) ;
}
auto application_end_time = std : : chrono : : high_resolution_clock : : now ( ) ;
2023-12-01 00:17:45 +01:00
delete [ ] threads ;
2011-11-05 17:36:07 +01:00
2015-07-20 07:10:56 +02:00
# if defined(G3LOG_PERFORMANCE)
2023-12-01 00:17:45 +01:00
worker . reset ( ) ; // will flush anything in the queue to file
2011-11-05 17:36:07 +01:00
# elif defined(GOOGLE_GLOG_PERFORMANCE)
2015-02-02 08:31:43 +01:00
google : : ShutdownGoogleLogging ( ) ;
2011-11-05 17:36:07 +01:00
# endif
2015-02-02 08:31:43 +01:00
auto worker_end_time = std : : chrono : : high_resolution_clock : : now ( ) ;
uint64_t application_time_us = std : : chrono : : duration_cast < microsecond > ( application_end_time - start_time ) . count ( ) ;
uint64_t total_time_us = std : : chrono : : duration_cast < microsecond > ( worker_end_time - start_time ) . count ( ) ;
2023-12-01 00:17:45 +01:00
oss < < " \n "
< < number_of_threads < < " * " < < g_iterations < < " log entries took: [ " < < total_time_us / us_to_s < < " s] to write to disk " < < std : : endl ;
2015-02-02 08:31:43 +01:00
oss < < " [Application( " < < number_of_threads < < " _threads+overhead time for measurement): \t " < < application_time_us / us_to_ms < < " ms] " < < std : : endl ;
oss < < " [Background thread to finish: \t \t \t \t " < < total_time_us / us_to_ms < < " ms] " < < std : : endl ;
oss < < " \n Average time per log entry: " < < std : : endl ;
oss < < " [Application: " < < application_time_us / ( number_of_threads * g_iterations ) < < " us] " < < std : : endl ;
2023-12-01 00:17:45 +01:00
for ( uint64_t idx = 0 ; idx < number_of_threads ; + + idx ) {
std : : vector < uint64_t > & t_result = threads_result [ idx ] ;
2015-02-02 08:31:43 +01:00
uint64_t worstUs = ( * std : : max_element ( t_result . begin ( ) , t_result . end ( ) ) ) ;
2023-12-01 00:17:45 +01:00
oss < < " [Application t " < < idx + 1 < < " worst took: " < < worstUs / uint64_t ( 1000 ) < < " ms ( " < < worstUs < < " us)] " < < std : : endl ;
2015-02-02 08:31:43 +01:00
}
writeTextToFile ( g_measurement_dump , oss . str ( ) , kAppend ) ;
std : : cout < < " Result can be found at: " < < g_measurement_dump < < std : : endl ;
// now split the result in buckets of 10ms each so that it's obvious how the peaks go
std : : vector < uint64_t > all_measurements ;
all_measurements . reserve ( g_iterations * number_of_threads ) ;
2023-12-01 00:17:45 +01:00
for ( uint64_t idx = 0 ; idx < number_of_threads ; + + idx ) {
std : : vector < uint64_t > & t_result = threads_result [ idx ] ;
2015-02-02 08:31:43 +01:00
all_measurements . insert ( all_measurements . end ( ) , t_result . begin ( ) , t_result . end ( ) ) ;
}
2023-12-01 00:17:45 +01:00
delete [ ] threads_result ; // finally get rid of them
2015-02-02 08:31:43 +01:00
2023-12-01 00:17:45 +01:00
std : : sort ( all_measurements . begin ( ) , all_measurements . end ( ) ) ;
2015-02-02 08:31:43 +01:00
std : : map < uint64_t , uint64_t > value_amounts ;
std : : map < uint64_t , uint64_t > value_amounts_for_0ms_bucket ;
2023-12-01 00:17:45 +01:00
for ( auto iter = all_measurements . begin ( ) ; iter ! = all_measurements . end ( ) ; + + iter ) {
uint64_t value = ( * iter ) / us_to_ms ; // convert to ms
+ + value_amounts [ value ] ; // asuming uint64_t is default 0 when initialized
2015-02-02 08:31:43 +01:00
if ( 0 = = value ) {
+ + value_amounts_for_0ms_bucket [ * iter ] ;
}
}
oss . str ( " " ) ;
oss < < " Number of values rounded to milliseconds and put to [millisecond bucket] were dumped to file: " < < g_measurement_bucket_dump < < std : : endl ;
if ( 1 = = value_amounts . size ( ) ) {
2023-12-01 00:17:45 +01:00
oss < < " Format: bucket of us inside bucket0 for ms \n Format:bucket_of_ms, number_of_values_in_bucket \n \n "
< < std : : endl ;
2015-02-02 08:31:43 +01:00
oss < < " \n " ;
2023-12-01 00:17:45 +01:00
} else {
oss < < " Format:bucket_of_ms, number_of_values_in_bucket \n \n "
< < std : : endl ;
2015-02-02 08:31:43 +01:00
}
std : : cout < < oss . str ( ) < < std : : endl ;
//
// If all values are for the 0ms bucket then instead show us buckets
//
if ( 1 = = value_amounts . size ( ) ) {
oss < < " \n \n ***** Microsecond bucket measurement for all measurements that went inside the '0 millisecond bucket' **** \n " ;
for ( auto us_bucket : value_amounts_for_0ms_bucket ) {
oss < < us_bucket . first < < " \t " < < us_bucket . second < < std : : endl ;
}
oss < < " \n \n ***** Millisecond bucket measurement **** \n " ;
}
2023-12-01 00:17:45 +01:00
for ( auto ms_bucket : value_amounts ) {
2015-02-02 08:31:43 +01:00
oss < < ms_bucket . first < < " \t , " < < ms_bucket . second < < std : : endl ;
}
2023-12-01 00:17:45 +01:00
writeTextToFile ( g_measurement_bucket_dump , oss . str ( ) , kAppend , false ) ;
2015-02-02 08:31:43 +01:00
return 0 ;
2011-11-05 17:36:07 +01:00
}