From 00c4a9af72ad1466416616f8c76174382c090988 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Th=C3=A9ophile=20Bastian?= Date: Mon, 15 Jul 2019 17:49:10 +0200 Subject: [PATCH] Speedup: add new result analysis tools --- benching/tools/common.sh | 18 +++++++ benching/tools/errors.sh | 86 ++++++++++++++++++++++++++++++++ benching/tools/errors_new.sh | 86 ++++++++++++++++++++++++++++++++ benching/tools/gen_perf_stats.py | 2 +- benching/tools/statistics.sh | 38 ++++++++++++++ 5 files changed, 229 insertions(+), 1 deletion(-) create mode 100755 benching/tools/common.sh create mode 100755 benching/tools/errors.sh create mode 100755 benching/tools/errors_new.sh create mode 100755 benching/tools/statistics.sh diff --git a/benching/tools/common.sh b/benching/tools/common.sh new file mode 100755 index 0000000..59a6b88 --- /dev/null +++ b/benching/tools/common.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +if [ "$#" -lt 1 ] ; then + >&2 echo "Missing argument: directory" + exit 1 +fi + +BENCH_DIR="$(echo $1 | sed 's@/$@@g')" +ENV_APPLY="$(readlink -f "$(dirname $0)/../../env/apply")" + +if ! [ -f "$ENV_APPLY" ] ; then + >&2 echo "Cannot find helper scripts. Abort." + exit 1 +fi + +function status_report { + echo -e "\e[33;1m[$BENCH_DIR]\e[0m $1" +} diff --git a/benching/tools/errors.sh b/benching/tools/errors.sh new file mode 100755 index 0000000..f935c47 --- /dev/null +++ b/benching/tools/errors.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +source "$(dirname $0)/common.sh" + +TMP_FILE=$(mktemp) + +function get_perf_output { + envtype=$1 + source $ENV_APPLY "$envtype" "dbg" + LD_LIBRARY_PATH="$BENCH_DIR/eh_elfs:$LD_LIBRARY_PATH" \ + UNW_DEBUG_LEVEL=15 \ + perf report -i "$BENCH_DIR/perf.data" 2>$TMP_FILE >/dev/null + deactivate +} + +function count_successes { + cat $TMP_FILE | tail -n 1 | sed 's/^.*, \([0-9]*\) calls.*$/\1/g' +} + +function count_total_calls { + cat $TMP_FILE | grep -c "^ >.*step:.* returning" +} + +function count_errors { + cat $TMP_FILE | grep -c "^ >.*step:.* returning -" +} + +function count_eh_fallbacks { + cat $TMP_FILE | grep -c "step:.* falling back" +} + +function count_vanilla_fallbacks { + cat $TMP_FILE | grep -c "step:.* frame-chain" +} + +function count_fallbacks_to_dwarf { + cat $TMP_FILE | grep -c "step:.* fallback with" +} + +function count_fallbacks_failed { + cat $TMP_FILE | grep -c "step:.* dwarf_step also failed" +} + +function report { + flavour="$1" + + status_report "$flavour issues distribution" + + successes=$(count_successes) + failures=$(count_errors) + total=$(count_total_calls) + + if [ "$flavour" = "eh_elf" ]; then + fallbacks=$(count_eh_fallbacks) + fallbacks_to_dwarf=$(count_fallbacks_to_dwarf) + fallbacks_failed=$(count_fallbacks_failed) + fallbacks_to_heuristics="$(( $fallbacks \ + - $fallbacks_to_dwarf \ + - $fallbacks_failed))" + echo -e "* success:\t\t\t\t$successes" + echo -e "* fallback to DWARF:\t\t\t$fallbacks_to_dwarf" + echo -e "* fallback to libunwind heuristics:\t$fallbacks_to_heuristics" + computed_sum=$(( $successes + $fallbacks - $fallbacks_failed + $failures )) + else + fallbacks=$(count_vanilla_fallbacks) + successes=$(( $successes - $fallbacks )) + echo -e "* success:\t\t\t\t$successes" + echo -e "* fallback to libunwind heuristics:\t$fallbacks" + computed_sum=$(( $successes + $fallbacks + $failures )) + fi + echo -e "* fail to unwind:\t\t\t$failures" + echo -e "* total:\t\t\t\t$total" + if [ "$computed_sum" -ne "$total" ] ; then + echo "-- WARNING: missing cases (computed sum $computed_sum != $total) --" + fi +} + +# eh_elf stats +get_perf_output "eh_elf" +report "eh_elf" + +# Vanilla stats +get_perf_output "vanilla" +report "vanilla" + +rm "$TMP_FILE" diff --git a/benching/tools/errors_new.sh b/benching/tools/errors_new.sh new file mode 100755 index 0000000..f935c47 --- /dev/null +++ b/benching/tools/errors_new.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +source "$(dirname $0)/common.sh" + +TMP_FILE=$(mktemp) + +function get_perf_output { + envtype=$1 + source $ENV_APPLY "$envtype" "dbg" + LD_LIBRARY_PATH="$BENCH_DIR/eh_elfs:$LD_LIBRARY_PATH" \ + UNW_DEBUG_LEVEL=15 \ + perf report -i "$BENCH_DIR/perf.data" 2>$TMP_FILE >/dev/null + deactivate +} + +function count_successes { + cat $TMP_FILE | tail -n 1 | sed 's/^.*, \([0-9]*\) calls.*$/\1/g' +} + +function count_total_calls { + cat $TMP_FILE | grep -c "^ >.*step:.* returning" +} + +function count_errors { + cat $TMP_FILE | grep -c "^ >.*step:.* returning -" +} + +function count_eh_fallbacks { + cat $TMP_FILE | grep -c "step:.* falling back" +} + +function count_vanilla_fallbacks { + cat $TMP_FILE | grep -c "step:.* frame-chain" +} + +function count_fallbacks_to_dwarf { + cat $TMP_FILE | grep -c "step:.* fallback with" +} + +function count_fallbacks_failed { + cat $TMP_FILE | grep -c "step:.* dwarf_step also failed" +} + +function report { + flavour="$1" + + status_report "$flavour issues distribution" + + successes=$(count_successes) + failures=$(count_errors) + total=$(count_total_calls) + + if [ "$flavour" = "eh_elf" ]; then + fallbacks=$(count_eh_fallbacks) + fallbacks_to_dwarf=$(count_fallbacks_to_dwarf) + fallbacks_failed=$(count_fallbacks_failed) + fallbacks_to_heuristics="$(( $fallbacks \ + - $fallbacks_to_dwarf \ + - $fallbacks_failed))" + echo -e "* success:\t\t\t\t$successes" + echo -e "* fallback to DWARF:\t\t\t$fallbacks_to_dwarf" + echo -e "* fallback to libunwind heuristics:\t$fallbacks_to_heuristics" + computed_sum=$(( $successes + $fallbacks - $fallbacks_failed + $failures )) + else + fallbacks=$(count_vanilla_fallbacks) + successes=$(( $successes - $fallbacks )) + echo -e "* success:\t\t\t\t$successes" + echo -e "* fallback to libunwind heuristics:\t$fallbacks" + computed_sum=$(( $successes + $fallbacks + $failures )) + fi + echo -e "* fail to unwind:\t\t\t$failures" + echo -e "* total:\t\t\t\t$total" + if [ "$computed_sum" -ne "$total" ] ; then + echo "-- WARNING: missing cases (computed sum $computed_sum != $total) --" + fi +} + +# eh_elf stats +get_perf_output "eh_elf" +report "eh_elf" + +# Vanilla stats +get_perf_output "vanilla" +report "vanilla" + +rm "$TMP_FILE" diff --git a/benching/tools/gen_perf_stats.py b/benching/tools/gen_perf_stats.py index ef8140e..34f42da 100644 --- a/benching/tools/gen_perf_stats.py +++ b/benching/tools/gen_perf_stats.py @@ -73,7 +73,7 @@ def get_ratios(avgs): avg_of("vanilla"), avg_of("vanilla-nocache") ) else: - return avg_of("vanilla-nocache") + return avg_of("vanilla") print( diff --git a/benching/tools/statistics.sh b/benching/tools/statistics.sh new file mode 100755 index 0000000..67fbdc0 --- /dev/null +++ b/benching/tools/statistics.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +source "$(dirname $0)/common.sh" + +TEMP_DIR="$(mktemp -d)" +NB_RUNS=10 + +function collect_perf_time_data { + envtype=$1 + source $ENV_APPLY "$envtype" "release" + LD_LIBRARY_PATH="$BENCH_DIR/eh_elfs:$LD_LIBRARY_PATH" \ + perf report -i "$BENCH_DIR/perf.data" 2>&1 >/dev/null \ + | tail -n 1 \ + | python "$(dirname $0)/to_report_fmt.py" \ + | sed 's/^.* & .* & \([0-9]*\) & .*$/\1/g' + deactivate +} + +function collect_perf_time_data_runs { + envtype=$1 + outfile=$2 + status_report "Collecting $envtype data over $NB_RUNS runs" + rm -f "$outfile" + for run in $(seq 1 $NB_RUNS); do + collect_perf_time_data "$envtype" >> "$outfile" + done +} + +eh_elf_data="$TEMP_DIR/eh_elf_times" +vanilla_data="$TEMP_DIR/vanilla_times" + +collect_perf_time_data_runs "eh_elf" "$eh_elf_data" +collect_perf_time_data_runs "vanilla" "$vanilla_data" + +status_report "benchmark statistics" +python "$(dirname "$0")/gen_perf_stats.py" "$TEMP_DIR" + +rm -rf "$TEMP_DIR"