Compare commits
5 commits
22bfb62bf3
...
b9c6f748ce
Author | SHA1 | Date | |
---|---|---|---|
Théophile Bastian | b9c6f748ce | ||
Théophile Bastian | 4846775529 | ||
Théophile Bastian | 2e449a9822 | ||
Théophile Bastian | 00c4a9af72 | ||
Théophile Bastian | 2561d3ed49 |
8
benching/python3.7/test.py
Normal file
8
benching/python3.7/test.py
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
def slow_fibo(n):
|
||||||
|
if n <= 1:
|
||||||
|
return 1
|
||||||
|
return slow_fibo(n - 1) + slow_fibo(n - 2)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
slow_fibo(35)
|
18
benching/tools/common.sh
Executable file
18
benching/tools/common.sh
Executable file
|
@ -0,0 +1,18 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ "$#" -lt 1 ] ; then
|
||||||
|
>&2 echo "Missing argument: directory"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
BENCH_DIR="$(echo $1 | sed 's@/$@@g')"
|
||||||
|
ENV_APPLY="$(readlink -f "$(dirname $0)/../../env/apply")"
|
||||||
|
|
||||||
|
if ! [ -f "$ENV_APPLY" ] ; then
|
||||||
|
>&2 echo "Cannot find helper scripts. Abort."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
function status_report {
|
||||||
|
echo -e "\e[33;1m[$BENCH_DIR]\e[0m $1"
|
||||||
|
}
|
101
benching/tools/errors.sh
Executable file
101
benching/tools/errors.sh
Executable file
|
@ -0,0 +1,101 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
source "$(dirname $0)/common.sh"
|
||||||
|
|
||||||
|
TMP_FILE=$(mktemp)
|
||||||
|
if [ -z "$EH_ELFS_NAME" ]; then
|
||||||
|
EH_ELFS_NAME="eh_elfs"
|
||||||
|
fi
|
||||||
|
|
||||||
|
function get_perf_output {
|
||||||
|
envtype=$1
|
||||||
|
source $ENV_APPLY "$envtype" "dbg"
|
||||||
|
LD_LIBRARY_PATH="$BENCH_DIR/$EH_ELFS_NAME:$LD_LIBRARY_PATH" \
|
||||||
|
UNW_DEBUG_LEVEL=15 \
|
||||||
|
perf report -i "$BENCH_DIR/perf.data" 2>$TMP_FILE >/dev/null
|
||||||
|
deactivate
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_successes {
|
||||||
|
cat $TMP_FILE | tail -n 1 | sed 's/^.*, \([0-9]*\) calls.*$/\1/g'
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_total_calls {
|
||||||
|
cat $TMP_FILE | grep -c "^ >.*step:.* returning"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_errors {
|
||||||
|
cat $TMP_FILE | grep -c "^ >.*step:.* returning -"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_eh_fallbacks {
|
||||||
|
cat $TMP_FILE | grep -c "step:.* falling back"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_vanilla_fallbacks {
|
||||||
|
cat $TMP_FILE | grep -c "step:.* frame-chain"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_fallbacks_to_dwarf {
|
||||||
|
cat $TMP_FILE | grep -c "step:.* fallback with"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_fallbacks_failed {
|
||||||
|
cat $TMP_FILE | grep -c "step:.* dwarf_step also failed"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_fail_after_fallback_to_dwarf {
|
||||||
|
cat $TMP_FILE \
|
||||||
|
| "$(dirname $0)/line_patterns.py" \
|
||||||
|
"fallback with" \
|
||||||
|
"step:.* unw_step called" \
|
||||||
|
~"step:.* unw_step called" \
|
||||||
|
"step:.* returning -" \
|
||||||
|
| grep Complete -c
|
||||||
|
}
|
||||||
|
|
||||||
|
function report {
|
||||||
|
flavour="$1"
|
||||||
|
|
||||||
|
status_report "$flavour issues distribution"
|
||||||
|
|
||||||
|
successes=$(count_successes)
|
||||||
|
failures=$(count_errors)
|
||||||
|
total=$(count_total_calls)
|
||||||
|
|
||||||
|
if [ "$flavour" = "eh_elf" ]; then
|
||||||
|
fallbacks=$(count_eh_fallbacks)
|
||||||
|
fallbacks_to_dwarf=$(count_fallbacks_to_dwarf)
|
||||||
|
fallbacks_to_dwarf_failed_after=$(count_fail_after_fallback_to_dwarf)
|
||||||
|
fallbacks_failed=$(count_fallbacks_failed)
|
||||||
|
fallbacks_to_heuristics="$(( $fallbacks \
|
||||||
|
- $fallbacks_to_dwarf \
|
||||||
|
- $fallbacks_failed))"
|
||||||
|
echo -e "* success:\t\t\t\t$successes"
|
||||||
|
echo -e "* fallback to DWARF:\t\t\t$fallbacks_to_dwarf"
|
||||||
|
echo -e "* …of which failed at next step:\t$fallbacks_to_dwarf_failed_after"
|
||||||
|
echo -e "* fallback to libunwind heuristics:\t$fallbacks_to_heuristics"
|
||||||
|
computed_sum=$(( $successes + $fallbacks - $fallbacks_failed + $failures ))
|
||||||
|
else
|
||||||
|
fallbacks=$(count_vanilla_fallbacks)
|
||||||
|
successes=$(( $successes - $fallbacks ))
|
||||||
|
echo -e "* success:\t\t\t\t$successes"
|
||||||
|
echo -e "* fallback to libunwind heuristics:\t$fallbacks"
|
||||||
|
computed_sum=$(( $successes + $fallbacks + $failures ))
|
||||||
|
fi
|
||||||
|
echo -e "* fail to unwind:\t\t\t$failures"
|
||||||
|
echo -e "* total:\t\t\t\t$total"
|
||||||
|
if [ "$computed_sum" -ne "$total" ] ; then
|
||||||
|
echo "-- WARNING: missing cases (computed sum $computed_sum != $total) --"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# eh_elf stats
|
||||||
|
get_perf_output "eh_elf"
|
||||||
|
report "eh_elf"
|
||||||
|
|
||||||
|
# Vanilla stats
|
||||||
|
get_perf_output "vanilla"
|
||||||
|
report "vanilla"
|
||||||
|
|
||||||
|
rm "$TMP_FILE"
|
86
benching/tools/errors_new.sh
Executable file
86
benching/tools/errors_new.sh
Executable file
|
@ -0,0 +1,86 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
source "$(dirname $0)/common.sh"
|
||||||
|
|
||||||
|
TMP_FILE=$(mktemp)
|
||||||
|
|
||||||
|
function get_perf_output {
|
||||||
|
envtype=$1
|
||||||
|
source $ENV_APPLY "$envtype" "dbg"
|
||||||
|
LD_LIBRARY_PATH="$BENCH_DIR/eh_elfs:$LD_LIBRARY_PATH" \
|
||||||
|
UNW_DEBUG_LEVEL=15 \
|
||||||
|
perf report -i "$BENCH_DIR/perf.data" 2>$TMP_FILE >/dev/null
|
||||||
|
deactivate
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_successes {
|
||||||
|
cat $TMP_FILE | tail -n 1 | sed 's/^.*, \([0-9]*\) calls.*$/\1/g'
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_total_calls {
|
||||||
|
cat $TMP_FILE | grep -c "^ >.*step:.* returning"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_errors {
|
||||||
|
cat $TMP_FILE | grep -c "^ >.*step:.* returning -"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_eh_fallbacks {
|
||||||
|
cat $TMP_FILE | grep -c "step:.* falling back"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_vanilla_fallbacks {
|
||||||
|
cat $TMP_FILE | grep -c "step:.* frame-chain"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_fallbacks_to_dwarf {
|
||||||
|
cat $TMP_FILE | grep -c "step:.* fallback with"
|
||||||
|
}
|
||||||
|
|
||||||
|
function count_fallbacks_failed {
|
||||||
|
cat $TMP_FILE | grep -c "step:.* dwarf_step also failed"
|
||||||
|
}
|
||||||
|
|
||||||
|
function report {
|
||||||
|
flavour="$1"
|
||||||
|
|
||||||
|
status_report "$flavour issues distribution"
|
||||||
|
|
||||||
|
successes=$(count_successes)
|
||||||
|
failures=$(count_errors)
|
||||||
|
total=$(count_total_calls)
|
||||||
|
|
||||||
|
if [ "$flavour" = "eh_elf" ]; then
|
||||||
|
fallbacks=$(count_eh_fallbacks)
|
||||||
|
fallbacks_to_dwarf=$(count_fallbacks_to_dwarf)
|
||||||
|
fallbacks_failed=$(count_fallbacks_failed)
|
||||||
|
fallbacks_to_heuristics="$(( $fallbacks \
|
||||||
|
- $fallbacks_to_dwarf \
|
||||||
|
- $fallbacks_failed))"
|
||||||
|
echo -e "* success:\t\t\t\t$successes"
|
||||||
|
echo -e "* fallback to DWARF:\t\t\t$fallbacks_to_dwarf"
|
||||||
|
echo -e "* fallback to libunwind heuristics:\t$fallbacks_to_heuristics"
|
||||||
|
computed_sum=$(( $successes + $fallbacks - $fallbacks_failed + $failures ))
|
||||||
|
else
|
||||||
|
fallbacks=$(count_vanilla_fallbacks)
|
||||||
|
successes=$(( $successes - $fallbacks ))
|
||||||
|
echo -e "* success:\t\t\t\t$successes"
|
||||||
|
echo -e "* fallback to libunwind heuristics:\t$fallbacks"
|
||||||
|
computed_sum=$(( $successes + $fallbacks + $failures ))
|
||||||
|
fi
|
||||||
|
echo -e "* fail to unwind:\t\t\t$failures"
|
||||||
|
echo -e "* total:\t\t\t\t$total"
|
||||||
|
if [ "$computed_sum" -ne "$total" ] ; then
|
||||||
|
echo "-- WARNING: missing cases (computed sum $computed_sum != $total) --"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# eh_elf stats
|
||||||
|
get_perf_output "eh_elf"
|
||||||
|
report "eh_elf"
|
||||||
|
|
||||||
|
# Vanilla stats
|
||||||
|
get_perf_output "vanilla"
|
||||||
|
report "vanilla"
|
||||||
|
|
||||||
|
rm "$TMP_FILE"
|
27
benching/tools/gen_evals.sh
Executable file
27
benching/tools/gen_evals.sh
Executable file
|
@ -0,0 +1,27 @@
|
||||||
|
OUTPUT="$1"
|
||||||
|
NB_ITER=10
|
||||||
|
|
||||||
|
if [ "$#" -lt 1 ] ; then
|
||||||
|
>&2 echo "Missing argument: output directory."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$EH_ELFS" ]; then
|
||||||
|
>&2 echo "Missing environment: EH_ELFS. Aborting."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$OUTPUT"
|
||||||
|
|
||||||
|
for flavour in 'eh_elf' 'vanilla' 'vanilla-nocache'; do
|
||||||
|
>&2 echo "$flavour..."
|
||||||
|
source "$(dirname "$0")/../../env/apply" "$flavour" release
|
||||||
|
for iter in $(seq 1 $NB_ITER); do
|
||||||
|
>&2 echo -e "\t$iter..."
|
||||||
|
LD_LIBRARY_PATH="$EH_ELFS:$LD_LIBRARY_PATH" \
|
||||||
|
perf report 2>&1 >/dev/null | tail -n 1 \
|
||||||
|
| python "$(dirname $0)/to_report_fmt.py" \
|
||||||
|
| sed 's/^.* & .* & \([0-9]*\) & .*$/\1/g'
|
||||||
|
done > "$OUTPUT/${flavour}_times"
|
||||||
|
deactivate
|
||||||
|
done
|
|
@ -3,42 +3,59 @@
|
||||||
""" Generates performance statistics for the eh_elf vs vanilla libunwind unwinding,
|
""" Generates performance statistics for the eh_elf vs vanilla libunwind unwinding,
|
||||||
based on time series generated beforehand
|
based on time series generated beforehand
|
||||||
|
|
||||||
First run
|
Intended to be run from `statistics.sh`
|
||||||
```bash
|
|
||||||
for i in $(seq 1 100); do
|
|
||||||
perf report 2>&1 >/dev/null | tail -n 1 \
|
|
||||||
| python ../hackbench/to_report_fmt.py \
|
|
||||||
| sed 's/^.* & .* & \([0-9]*\) & .*$/\1/g'
|
|
||||||
done > $SOME_PLACE/$FLAVOUR_times
|
|
||||||
```
|
|
||||||
|
|
||||||
for each flavour (eh_elf, vanilla)
|
|
||||||
|
|
||||||
Then run this script, with `$SOME_PLACE` as argument.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from collections import namedtuple
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
Datapoint = namedtuple("Datapoint", ["nb_frames", "total_time", "avg_time"])
|
||||||
|
|
||||||
|
|
||||||
def read_series(path):
|
def read_series(path):
|
||||||
with open(path, "r") as handle:
|
with open(path, "r") as handle:
|
||||||
for line in handle:
|
for line in handle:
|
||||||
yield int(line.strip())
|
nb_frames, total_time, avg_time = map(int, line.strip().split())
|
||||||
|
yield Datapoint(nb_frames, total_time, avg_time)
|
||||||
|
|
||||||
|
|
||||||
FLAVOURS = ["eh_elf", "vanilla"]
|
FLAVOURS = ["eh_elf", "vanilla"]
|
||||||
|
WITH_NOCACHE = False
|
||||||
|
|
||||||
|
if "WITH_NOCACHE" in os.environ:
|
||||||
|
WITH_NOCACHE = True
|
||||||
|
FLAVOURS.append("vanilla-nocache")
|
||||||
|
|
||||||
path_format = os.path.join(sys.argv[1], "{}_times")
|
path_format = os.path.join(sys.argv[1], "{}_times")
|
||||||
times = {}
|
datapoints = {}
|
||||||
|
avg_times = {}
|
||||||
|
total_times = {}
|
||||||
|
avgs_total = {}
|
||||||
avgs = {}
|
avgs = {}
|
||||||
std_deviations = {}
|
std_deviations = {}
|
||||||
|
unwound_frames = {}
|
||||||
|
|
||||||
for flv in FLAVOURS:
|
for flv in FLAVOURS:
|
||||||
times[flv] = list(read_series(path_format.format(flv)))
|
datapoints[flv] = list(read_series(path_format.format(flv)))
|
||||||
avgs[flv] = sum(times[flv]) / len(times[flv])
|
avg_times[flv] = list(map(lambda x: x.avg_time, datapoints[flv]))
|
||||||
std_deviations[flv] = np.sqrt(np.var(times[flv]))
|
total_times[flv] = list(map(lambda x: x.total_time, datapoints[flv]))
|
||||||
|
avgs[flv] = sum(avg_times[flv]) / len(avg_times[flv])
|
||||||
|
avgs_total[flv] = sum(total_times[flv]) / len(total_times[flv])
|
||||||
|
std_deviations[flv] = np.sqrt(np.var(avg_times[flv]))
|
||||||
|
|
||||||
|
cur_unwound_frames = list(map(lambda x: x.nb_frames, datapoints[flv]))
|
||||||
|
unwound_frames[flv] = cur_unwound_frames[0]
|
||||||
|
for run_id, unw_frames in enumerate(cur_unwound_frames[1:]):
|
||||||
|
if unw_frames != unwound_frames[flv]:
|
||||||
|
print(
|
||||||
|
"{}, run {}: unwound {} frames, reference unwound {}".format(
|
||||||
|
flv, run_id + 1, unw_frames, unwound_frames[flv]
|
||||||
|
),
|
||||||
|
file=sys.stderr,
|
||||||
|
)
|
||||||
|
|
||||||
avg_ratio = avgs["vanilla"] / avgs["eh_elf"]
|
avg_ratio = avgs["vanilla"] / avgs["eh_elf"]
|
||||||
ratio_uncertainty = (
|
ratio_uncertainty = (
|
||||||
|
@ -51,22 +68,39 @@ ratio_uncertainty = (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def format_flv(flv_dict, formatter):
|
def format_flv(flv_dict, formatter, alterator=None):
|
||||||
out = ""
|
out = ""
|
||||||
for flv in FLAVOURS:
|
for flv in FLAVOURS:
|
||||||
val = flv_dict[flv]
|
val = flv_dict[flv]
|
||||||
out += "* {}: {}\n".format(flv, formatter.format(val))
|
altered = alterator(val) if alterator else val
|
||||||
|
out += "* {}: {}\n".format(flv, formatter.format(altered))
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def get_ratios(avgs):
|
||||||
|
def avg_of(flavour):
|
||||||
|
return avgs[flavour] / avgs["eh_elf"]
|
||||||
|
|
||||||
|
if WITH_NOCACHE:
|
||||||
|
return "\n\tcached: {}\n\tuncached: {}".format(
|
||||||
|
avg_of("vanilla"), avg_of("vanilla-nocache")
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return avg_of("vanilla")
|
||||||
|
|
||||||
|
|
||||||
print(
|
print(
|
||||||
"Average time:\n{}\n"
|
"Unwound frames:\n{}\n"
|
||||||
|
"Average whole unwinding time (one run):\n{}\n"
|
||||||
|
"Average time to unwind one frame:\n{}\n"
|
||||||
"Standard deviation:\n{}\n"
|
"Standard deviation:\n{}\n"
|
||||||
"Average ratio: {}\n"
|
"Average ratio: {}\n"
|
||||||
"Ratio uncertainty: {}".format(
|
"Ratio uncertainty: {}".format(
|
||||||
|
format_flv(unwound_frames, "{}"),
|
||||||
|
format_flv(avgs_total, "{} μs", alterator=lambda x: x // 1000),
|
||||||
format_flv(avgs, "{} ns"),
|
format_flv(avgs, "{} ns"),
|
||||||
format_flv(std_deviations, "{}"),
|
format_flv(std_deviations, "{}"),
|
||||||
avg_ratio,
|
get_ratios(avgs),
|
||||||
ratio_uncertainty,
|
ratio_uncertainty,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
69
benching/tools/line_patterns.py
Executable file
69
benching/tools/line_patterns.py
Executable file
|
@ -0,0 +1,69 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class Match:
|
||||||
|
def __init__(self, re_str, negate=False):
|
||||||
|
self.re = re.compile(re_str)
|
||||||
|
self.negate = negate
|
||||||
|
|
||||||
|
def matches(self, line):
|
||||||
|
return self.re.search(line) is not None
|
||||||
|
|
||||||
|
|
||||||
|
class Matcher:
|
||||||
|
def __init__(self, match_objs):
|
||||||
|
self.match_objs = match_objs
|
||||||
|
self.match_pos = 0
|
||||||
|
self.matches = 0
|
||||||
|
|
||||||
|
if not self.match_objs:
|
||||||
|
raise Exception("No match expressions provided")
|
||||||
|
if self.match_objs[-1].negate:
|
||||||
|
raise Exception("The last match object must be a positive expression")
|
||||||
|
|
||||||
|
def feed(self, line):
|
||||||
|
for cur_pos, exp in enumerate(self.match_objs[self.match_pos :]):
|
||||||
|
cur_pos = cur_pos + self.match_pos
|
||||||
|
if not exp.negate: # Stops the for here, whether matching or not
|
||||||
|
if exp.matches(line):
|
||||||
|
self.match_pos = cur_pos + 1
|
||||||
|
print(
|
||||||
|
"Passing positive {}, advance to {}".format(
|
||||||
|
cur_pos, self.match_pos
|
||||||
|
)
|
||||||
|
)
|
||||||
|
if self.match_pos >= len(self.match_objs):
|
||||||
|
print("> Complete match, reset.")
|
||||||
|
self.matches += 1
|
||||||
|
self.match_pos = 0
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
if exp.matches(line):
|
||||||
|
print("Failing negative [{}] {}, reset".format(exp.negate, cur_pos))
|
||||||
|
old_match_pos = self.match_pos
|
||||||
|
self.match_pos = 0
|
||||||
|
if old_match_pos != 0:
|
||||||
|
print("> Refeed: ", end="")
|
||||||
|
self.feed(line)
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def get_args(args):
|
||||||
|
out_args = []
|
||||||
|
for arg in args:
|
||||||
|
negate = False
|
||||||
|
if arg[0] == "~":
|
||||||
|
negate = True
|
||||||
|
arg = arg[1:]
|
||||||
|
out_args.append(Match(arg, negate))
|
||||||
|
return out_args
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
matcher = Matcher(get_args(sys.argv[1:]))
|
||||||
|
for line in sys.stdin:
|
||||||
|
matcher.feed(line)
|
||||||
|
print(matcher.matches)
|
43
benching/tools/statistics.sh
Executable file
43
benching/tools/statistics.sh
Executable file
|
@ -0,0 +1,43 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
source "$(dirname $0)/common.sh"
|
||||||
|
|
||||||
|
TEMP_DIR="$(mktemp -d)"
|
||||||
|
NB_RUNS=10
|
||||||
|
|
||||||
|
function collect_perf_time_data {
|
||||||
|
envtype=$1
|
||||||
|
source $ENV_APPLY "$envtype" "release"
|
||||||
|
LD_LIBRARY_PATH="$BENCH_DIR/eh_elfs:$LD_LIBRARY_PATH" \
|
||||||
|
perf report -i "$BENCH_DIR/perf.data" 2>&1 >/dev/null \
|
||||||
|
| tail -n 1 \
|
||||||
|
| python "$(dirname $0)/to_report_fmt.py" \
|
||||||
|
| sed 's/^\([0-9]*\) & \([0-9]*\) & \([0-9]*\) & .*$/\1 \2 \3/g'
|
||||||
|
deactivate
|
||||||
|
}
|
||||||
|
|
||||||
|
function collect_perf_time_data_runs {
|
||||||
|
envtype=$1
|
||||||
|
outfile=$2
|
||||||
|
status_report "Collecting $envtype data over $NB_RUNS runs"
|
||||||
|
rm -f "$outfile"
|
||||||
|
for run in $(seq 1 $NB_RUNS); do
|
||||||
|
collect_perf_time_data "$envtype" >> "$outfile"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
eh_elf_data="$TEMP_DIR/eh_elf_times"
|
||||||
|
vanilla_data="$TEMP_DIR/vanilla_times"
|
||||||
|
|
||||||
|
collect_perf_time_data_runs "eh_elf" "$eh_elf_data"
|
||||||
|
collect_perf_time_data_runs "vanilla" "$vanilla_data"
|
||||||
|
|
||||||
|
if [ -n "$WITH_NOCACHE" ]; then
|
||||||
|
vanilla_nocache_data="$TEMP_DIR/vanilla-nocache_times"
|
||||||
|
collect_perf_time_data_runs "vanilla-nocache" "$vanilla_nocache_data"
|
||||||
|
fi
|
||||||
|
|
||||||
|
status_report "benchmark statistics"
|
||||||
|
python "$(dirname "$0")/gen_perf_stats.py" "$TEMP_DIR"
|
||||||
|
|
||||||
|
rm -rf "$TEMP_DIR"
|
21
benching/tools/to_report_fmt.py
Executable file
21
benching/tools/to_report_fmt.py
Executable file
|
@ -0,0 +1,21 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
line = input()
|
||||||
|
regex = \
|
||||||
|
re.compile(r'Total unwind time: ([0-9]*) s ([0-9]*) ns, ([0-9]*) calls')
|
||||||
|
|
||||||
|
match = regex.match(line.strip())
|
||||||
|
if not match:
|
||||||
|
print('Badly formatted line', file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
sec = int(match.group(1))
|
||||||
|
ns = int(match.group(2))
|
||||||
|
calls = int(match.group(3))
|
||||||
|
|
||||||
|
time = sec * 10**9 + ns
|
||||||
|
|
||||||
|
print("{} & {} & {} & ??".format(calls, time, time // calls))
|
Loading…
Reference in a new issue