Add scripts to ease performance graph generation

This commit is contained in:
Francesco Montorsi 2019-08-04 01:48:03 +02:00
parent 21a0815f2f
commit 877c32dc3d
3 changed files with 186 additions and 4 deletions

133
perf/generate_csv.sh Executable file
View File

@ -0,0 +1,133 @@
#!/bin/bash
#
# This script assumes that 2 machines are used to generate performance results.
# First machine is assumed to be the one where this script runs.
# Second machine is the "REMOTE_IP" machine; we assume to have passwordless SSH access.
#
set -u
# configurable values:
REMOTE_IP=${REMOTE_IP:-127.0.0.1}
REMOTE_PATH=/home/francesco/work/libzmq-orig/perf #/root/libzmq/perf
TEST_ENDPOINT=tcp://127.0.0.1:1234
MESSAGE_SIZE_LIST="1 16 64 128 512 1024 4096 16384 65536"
OUTPUT_FILE_PREFIX="results.txt"
OUTPUT_FILE_CSV_PREFIX="results.csv"
# utility functions:
function verify_ssh()
{
ssh $REMOTE_IP "ls /" >/dev/null
if [ $? -ne 0 ]; then
echo "Cannot connect via SSH passwordless to the REMOTE_IP $REMOTE_IP. Please fix the problem and retry."
exit 2
fi
ssh $REMOTE_IP "ls $REMOTE_PATH" >/dev/null
if [ $? -ne 0 ]; then
echo "The folder $REMOTE_PATH is not valid. Please fix the problem and retry."
exit 2
fi
echo "SSH connection to the remote $REMOTE_IP is working fine."
}
function run_remote_perf_util()
{
local MESSAGE_SIZE_BYTES="$1"
local REMOTE_PERF_UTIL="$2"
local NUM_MESSAGES="$3"
echo "Launching on $REMOTE_IP the utility [$REMOTE_PERF_UTIL] for messages ${MESSAGE_SIZE_BYTES}B long"
ssh $REMOTE_IP "$REMOTE_PATH/$REMOTE_PERF_UTIL $TEST_ENDPOINT $MESSAGE_SIZE_BYTES $NUM_MESSAGES" &
if [ $? -ne 0 ]; then
echo "Failed to launch remote perf util."
exit 2
fi
}
function generate_output_file()
{
local LOCAL_PERF_UTIL="$1" # must be the utility generating the TXT output
local REMOTE_PERF_UTIL="$2"
local OUTPUT_FILE_PREFIX="$3"
local NUM_MESSAGES="$4"
local CSV_HEADER_LINE="$5"
# derived values:
local OUTPUT_FILE_TXT="${OUTPUT_FILE_PREFIX}.txt" # useful just for human-friendly debugging
local OUTPUT_FILE_CSV="${OUTPUT_FILE_PREFIX}.csv" # actually used to later produce graphs
local MESSAGE_SIZE_ARRAY=($MESSAGE_SIZE_LIST)
echo "Killing still-running ZMQ performance utils, if any"
pkill $LOCAL_PERF_UTIL # in case it's running from a previous test
if [ ! -z "$REMOTE_PERF_UTIL" ]; then
ssh $REMOTE_IP "pkill $REMOTE_PERF_UTIL" # in case it's running from a previous test
fi
echo "Resetting output file $OUTPUT_FILE_TXT and $OUTPUT_FILE_CSV"
> $OUTPUT_FILE_TXT
echo "$CSV_HEADER_LINE" > $OUTPUT_FILE_CSV
for MESSAGE_SIZE in ${MESSAGE_SIZE_ARRAY[@]}; do
echo "Launching locally the utility [$LOCAL_PERF_UTIL] for messages ${MESSAGE_SIZE}B long"
./$LOCAL_PERF_UTIL $TEST_ENDPOINT $MESSAGE_SIZE $NUM_MESSAGES >${OUTPUT_FILE_TXT}-${MESSAGE_SIZE} &
if [ ! -z "$REMOTE_PERF_UTIL" ]; then
run_remote_perf_util $MESSAGE_SIZE $REMOTE_PERF_UTIL $NUM_MESSAGES
fi
wait
# produce the complete human-readable output file:
cat ${OUTPUT_FILE_TXT}-${MESSAGE_SIZE} >>${OUTPUT_FILE_TXT}
# produce a machine-friendly file for later plotting:
local DATALINE="$(cat ${OUTPUT_FILE_TXT}-${MESSAGE_SIZE} | grep -o '[0-9.]*' | tr '\n' ',')"
echo ${DATALINE::-1} >>$OUTPUT_FILE_CSV
rm -f ${OUTPUT_FILE_TXT}-${MESSAGE_SIZE}
done
echo "All measurements completed and saved into $OUTPUT_FILE_TXT and $OUTPUT_FILE_CSV"
}
# main:
verify_ssh
THROUGHPUT_CSV_HEADER_LINE="# message_size,message_count,PPS[msg/s],throughput[Mb/s]"
# PUSH/PULL TCP throughput CSV file:
generate_output_file "local_thr" "remote_thr" \
"pushpull_tcp_thr_results" \
"100000" \
"$THROUGHPUT_CSV_HEADER_LINE"
# PUSH/PULL INPROC throughput CSV file:
# NOTE: in this case there is no remote utility to run and no ENDPOINT to provide:
TEST_ENDPOINT=""
generate_output_file "inproc_thr" "" \
"pushpull_inproc_thr_results" \
"1000000" \
"$THROUGHPUT_CSV_HEADER_LINE"
# PUB/SUB proxy INPROC throughput CSV file:
# NOTE: in this case there is no remote utility to run and no ENDPOINT to provide:
TEST_ENDPOINT=""
generate_output_file "proxy_thr" "" \
"pubsubproxy_inproc_thr_results" \
"1000000" \
"$THROUGHPUT_CSV_HEADER_LINE"
# REQ/REP TCP latency CSV file:
# NOTE: in this case it's the remote_lat utility that prints out the data, so we swap the local/remote arguments to the bash func:
generate_output_file "remote_lat" "local_lat" \
"reqrep_tcp_lat_results" \
"10000" \
"# message_size,message_count,latency[us]"

49
perf/generate_graphs.py Executable file
View File

@ -0,0 +1,49 @@
#!/usr/bin/python
#
# This script assumes that a CSV file produced by "generate_csv.sh" is provided as input
#
# configurable values:
INPUT_FILE_PUSHPULL_TCP_THROUGHPUT="pushpull_tcp_thr_results.csv"
INPUT_FILE_PUSHPULL_INPROC_THROUGHPUT="pushpull_inproc_thr_results.csv"
INPUT_FILE_PUBSUBPROXY_INPROC_THROUGHPUT="pubsubproxy_inproc_thr_results.csv"
INPUT_FILE_REQREP_TCP_LATENCY="reqrep_tcp_lat_results.csv"
# dependencies
import matplotlib.pyplot as plt
import numpy as np
# functions
def plot_throughput(csv_filename, title):
message_size_bytes, message_count, pps, mbps = np.loadtxt(csv_filename, delimiter=',', unpack=True)
plt.semilogx(message_size_bytes, pps / 1e6, label='PPS [Mmsg/s]', marker='x')
plt.semilogx(message_size_bytes, mbps / 1e3, label='Throughput [Mb/s]', marker='o')
plt.xlabel('Message size [B]')
plt.title(title)
plt.legend()
plt.show()
plt.savefig(csv_filename.replace('.csv', '.png'))
def plot_latency(csv_filename, title):
message_size_bytes, message_count, lat = np.loadtxt(csv_filename, delimiter=',', unpack=True)
plt.semilogx(message_size_bytes, lat, label='Latency [us]', marker='o')
plt.xlabel('Message size [B]')
plt.title(title)
plt.legend()
plt.show()
plt.savefig(csv_filename.replace('.csv', '.png'))
# main
plot_throughput(INPUT_FILE_PUSHPULL_TCP_THROUGHPUT, 'ZeroMQ PUSH/PULL socket throughput, TCP transport')
plot_throughput(INPUT_FILE_PUSHPULL_INPROC_THROUGHPUT, 'ZeroMQ PUSH/PULL socket throughput, INPROC transport')
plot_throughput(INPUT_FILE_PUBSUBPROXY_INPROC_THROUGHPUT, 'ZeroMQ PUB/SUB PROXY socket throughput, INPROC transport')
plot_latency(INPUT_FILE_REQREP_TCP_LATENCY, 'ZeroMQ REQ/REP socket latency, TCP transport')

View File

@ -176,7 +176,7 @@ static void publisher_thread_main (void *pvoid)
}
zmq_close (pubsocket);
printf ("publisher thread ended\n");
//printf ("publisher thread ended\n");
}
static void subscriber_thread_main (void *pvoid)
@ -215,7 +215,7 @@ static void subscriber_thread_main (void *pvoid)
// Cleanup
zmq_close (subsocket);
printf ("subscriber thread ended\n");
//printf ("subscriber thread ended\n");
}
static void proxy_thread_main (void *pvoid)
@ -284,7 +284,7 @@ static void proxy_thread_main (void *pvoid)
zmq_close (frontend_xsub);
zmq_close (backend_xpub);
zmq_close (control_rep);
printf ("proxy thread ended\n");
//printf ("proxy thread ended\n");
}
void terminate_proxy (const proxy_hwm_cfg_t *cfg)
@ -314,7 +314,7 @@ void terminate_proxy (const proxy_hwm_cfg_t *cfg)
int main (int argc, char *argv[])
{
if (argc != 3) {
printf ("usage: inproc_thr <message-size> <message-count>\n");
printf ("usage: proxy_thr <message-size> <message-count>\n");
return 1;
}