16c00db4bb
Pull AFS fixes from David Howells: "Here's a set of patches that fix a number of bugs in the in-kernel AFS client, including: - Fix directory locking to not use individual page locks for directory reading/scanning but rather to use a semaphore on the afs_vnode struct as the directory contents must be read in a single blob and data from different reads must not be mixed as the entire contents may be shuffled about between reads. - Fix address list parsing to handle port specifiers correctly. - Only give up callback records on a server if we actually talked to that server (we might not be able to access a server). - Fix some callback handling bugs, including refcounting, whole-volume callbacks and when callbacks actually get broken in response to a CB.CallBack op. - Fix some server/address rotation bugs, including giving up if we can't probe a server; giving up if a server says it doesn't have a volume, but there are more servers to try. - Fix the decoding of fetched statuses to be OpenAFS compatible. - Fix the handling of server lookups in Cache Manager ops (such as CB.InitCallBackState3) to use a UUID if possible and to handle no server being found. - Fix a bug in server lookup where not all addresses are compared. - Fix the non-encryption of calls that prevents some servers from being accessed (this also requires an AF_RXRPC patch that has already gone in through the net tree). There's also a patch that adds tracepoints to log Cache Manager ops that don't find a matching server, either by UUID or by address" * tag 'afs-fixes-20180514' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs: afs: Fix the non-encryption of calls afs: Fix CB.CallBack handling afs: Fix whole-volume callback handling afs: Fix afs_find_server search loop afs: Fix the handling of an unfound server in CM operations afs: Add a tracepoint to record callbacks from unlisted servers afs: Fix the handling of CB.InitCallBackState3 to find the server by UUID afs: Fix VNOVOL handling in address rotation afs: Fix AFSFetchStatus decoder to provide OpenAFS compatibility afs: Fix server rotation's handling of fileserver probe failure afs: Fix refcounting in callback registration afs: Fix giving up callbacks on server destruction afs: Fix address list parsing afs: Fix directory page locking
342 lines
8.3 KiB
C
342 lines
8.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include "perf.h"
|
|
#include "tests.h"
|
|
#include "debug.h"
|
|
#include "symbol.h"
|
|
#include "sort.h"
|
|
#include "evsel.h"
|
|
#include "evlist.h"
|
|
#include "machine.h"
|
|
#include "thread.h"
|
|
#include "parse-events.h"
|
|
#include "hists_common.h"
|
|
#include <errno.h>
|
|
#include <linux/kernel.h>
|
|
|
|
struct sample {
|
|
u32 pid;
|
|
u64 ip;
|
|
struct thread *thread;
|
|
struct map *map;
|
|
struct symbol *sym;
|
|
};
|
|
|
|
/* For the numbers, see hists_common.c */
|
|
static struct sample fake_common_samples[] = {
|
|
/* perf [kernel] schedule() */
|
|
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
|
|
/* perf [perf] main() */
|
|
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
|
|
/* perf [perf] cmd_record() */
|
|
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
|
|
/* bash [bash] xmalloc() */
|
|
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
|
|
/* bash [libc] malloc() */
|
|
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, },
|
|
};
|
|
|
|
static struct sample fake_samples[][5] = {
|
|
{
|
|
/* perf [perf] run_command() */
|
|
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
|
|
/* perf [libc] malloc() */
|
|
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
|
|
/* perf [kernel] page_fault() */
|
|
{ .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
|
|
/* perf [kernel] sys_perf_event_open() */
|
|
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
|
|
/* bash [libc] free() */
|
|
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, },
|
|
},
|
|
{
|
|
/* perf [libc] free() */
|
|
{ .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
|
|
/* bash [libc] malloc() */
|
|
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
|
|
/* bash [bash] xfee() */
|
|
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, },
|
|
/* bash [libc] realloc() */
|
|
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, },
|
|
/* bash [kernel] page_fault() */
|
|
{ .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
|
|
},
|
|
};
|
|
|
|
static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
|
|
{
|
|
struct perf_evsel *evsel;
|
|
struct addr_location al;
|
|
struct hist_entry *he;
|
|
struct perf_sample sample = { .period = 1, .weight = 1, };
|
|
size_t i = 0, k;
|
|
|
|
/*
|
|
* each evsel will have 10 samples - 5 common and 5 distinct.
|
|
* However the second evsel also has a collapsed entry for
|
|
* "bash [libc] malloc" so total 9 entries will be in the tree.
|
|
*/
|
|
evlist__for_each_entry(evlist, evsel) {
|
|
struct hists *hists = evsel__hists(evsel);
|
|
|
|
for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
|
|
sample.cpumode = PERF_RECORD_MISC_USER;
|
|
sample.pid = fake_common_samples[k].pid;
|
|
sample.tid = fake_common_samples[k].pid;
|
|
sample.ip = fake_common_samples[k].ip;
|
|
|
|
if (machine__resolve(machine, &al, &sample) < 0)
|
|
goto out;
|
|
|
|
he = hists__add_entry(hists, &al, NULL,
|
|
NULL, NULL, &sample, true);
|
|
if (he == NULL) {
|
|
addr_location__put(&al);
|
|
goto out;
|
|
}
|
|
|
|
fake_common_samples[k].thread = al.thread;
|
|
fake_common_samples[k].map = al.map;
|
|
fake_common_samples[k].sym = al.sym;
|
|
}
|
|
|
|
for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
|
|
sample.pid = fake_samples[i][k].pid;
|
|
sample.tid = fake_samples[i][k].pid;
|
|
sample.ip = fake_samples[i][k].ip;
|
|
if (machine__resolve(machine, &al, &sample) < 0)
|
|
goto out;
|
|
|
|
he = hists__add_entry(hists, &al, NULL,
|
|
NULL, NULL, &sample, true);
|
|
if (he == NULL) {
|
|
addr_location__put(&al);
|
|
goto out;
|
|
}
|
|
|
|
fake_samples[i][k].thread = al.thread;
|
|
fake_samples[i][k].map = al.map;
|
|
fake_samples[i][k].sym = al.sym;
|
|
}
|
|
i++;
|
|
}
|
|
|
|
return 0;
|
|
|
|
out:
|
|
pr_debug("Not enough memory for adding a hist entry\n");
|
|
return -1;
|
|
}
|
|
|
|
static int find_sample(struct sample *samples, size_t nr_samples,
|
|
struct thread *t, struct map *m, struct symbol *s)
|
|
{
|
|
while (nr_samples--) {
|
|
if (samples->thread == t && samples->map == m &&
|
|
samples->sym == s)
|
|
return 1;
|
|
samples++;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int __validate_match(struct hists *hists)
|
|
{
|
|
size_t count = 0;
|
|
struct rb_root *root;
|
|
struct rb_node *node;
|
|
|
|
/*
|
|
* Only entries from fake_common_samples should have a pair.
|
|
*/
|
|
if (hists__has(hists, need_collapse))
|
|
root = &hists->entries_collapsed;
|
|
else
|
|
root = hists->entries_in;
|
|
|
|
node = rb_first(root);
|
|
while (node) {
|
|
struct hist_entry *he;
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node_in);
|
|
|
|
if (hist_entry__has_pairs(he)) {
|
|
if (find_sample(fake_common_samples,
|
|
ARRAY_SIZE(fake_common_samples),
|
|
he->thread, he->ms.map, he->ms.sym)) {
|
|
count++;
|
|
} else {
|
|
pr_debug("Can't find the matched entry\n");
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
node = rb_next(node);
|
|
}
|
|
|
|
if (count != ARRAY_SIZE(fake_common_samples)) {
|
|
pr_debug("Invalid count for matched entries: %zd of %zd\n",
|
|
count, ARRAY_SIZE(fake_common_samples));
|
|
return -1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int validate_match(struct hists *leader, struct hists *other)
|
|
{
|
|
return __validate_match(leader) || __validate_match(other);
|
|
}
|
|
|
|
static int __validate_link(struct hists *hists, int idx)
|
|
{
|
|
size_t count = 0;
|
|
size_t count_pair = 0;
|
|
size_t count_dummy = 0;
|
|
struct rb_root *root;
|
|
struct rb_node *node;
|
|
|
|
/*
|
|
* Leader hists (idx = 0) will have dummy entries from other,
|
|
* and some entries will have no pair. However every entry
|
|
* in other hists should have (dummy) pair.
|
|
*/
|
|
if (hists__has(hists, need_collapse))
|
|
root = &hists->entries_collapsed;
|
|
else
|
|
root = hists->entries_in;
|
|
|
|
node = rb_first(root);
|
|
while (node) {
|
|
struct hist_entry *he;
|
|
|
|
he = rb_entry(node, struct hist_entry, rb_node_in);
|
|
|
|
if (hist_entry__has_pairs(he)) {
|
|
if (!find_sample(fake_common_samples,
|
|
ARRAY_SIZE(fake_common_samples),
|
|
he->thread, he->ms.map, he->ms.sym) &&
|
|
!find_sample(fake_samples[idx],
|
|
ARRAY_SIZE(fake_samples[idx]),
|
|
he->thread, he->ms.map, he->ms.sym)) {
|
|
count_dummy++;
|
|
}
|
|
count_pair++;
|
|
} else if (idx) {
|
|
pr_debug("A entry from the other hists should have pair\n");
|
|
return -1;
|
|
}
|
|
|
|
count++;
|
|
node = rb_next(node);
|
|
}
|
|
|
|
/*
|
|
* Note that we have a entry collapsed in the other (idx = 1) hists.
|
|
*/
|
|
if (idx == 0) {
|
|
if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
|
|
pr_debug("Invalid count of dummy entries: %zd of %zd\n",
|
|
count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
|
|
return -1;
|
|
}
|
|
if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
|
|
pr_debug("Invalid count of total leader entries: %zd of %zd\n",
|
|
count, count_pair + ARRAY_SIZE(fake_samples[0]));
|
|
return -1;
|
|
}
|
|
} else {
|
|
if (count != count_pair) {
|
|
pr_debug("Invalid count of total other entries: %zd of %zd\n",
|
|
count, count_pair);
|
|
return -1;
|
|
}
|
|
if (count_dummy > 0) {
|
|
pr_debug("Other hists should not have dummy entries: %zd\n",
|
|
count_dummy);
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int validate_link(struct hists *leader, struct hists *other)
|
|
{
|
|
return __validate_link(leader, 0) || __validate_link(other, 1);
|
|
}
|
|
|
|
int test__hists_link(struct test *test __maybe_unused, int subtest __maybe_unused)
|
|
{
|
|
int err = -1;
|
|
struct hists *hists, *first_hists;
|
|
struct machines machines;
|
|
struct machine *machine = NULL;
|
|
struct perf_evsel *evsel, *first;
|
|
struct perf_evlist *evlist = perf_evlist__new();
|
|
|
|
if (evlist == NULL)
|
|
return -ENOMEM;
|
|
|
|
err = parse_events(evlist, "cpu-clock", NULL);
|
|
if (err)
|
|
goto out;
|
|
err = parse_events(evlist, "task-clock", NULL);
|
|
if (err)
|
|
goto out;
|
|
|
|
err = TEST_FAIL;
|
|
/* default sort order (comm,dso,sym) will be used */
|
|
if (setup_sorting(NULL) < 0)
|
|
goto out;
|
|
|
|
machines__init(&machines);
|
|
|
|
/* setup threads/dso/map/symbols also */
|
|
machine = setup_fake_machine(&machines);
|
|
if (!machine)
|
|
goto out;
|
|
|
|
if (verbose > 1)
|
|
machine__fprintf(machine, stderr);
|
|
|
|
/* process sample events */
|
|
err = add_hist_entries(evlist, machine);
|
|
if (err < 0)
|
|
goto out;
|
|
|
|
evlist__for_each_entry(evlist, evsel) {
|
|
hists = evsel__hists(evsel);
|
|
hists__collapse_resort(hists, NULL);
|
|
|
|
if (verbose > 2)
|
|
print_hists_in(hists);
|
|
}
|
|
|
|
first = perf_evlist__first(evlist);
|
|
evsel = perf_evlist__last(evlist);
|
|
|
|
first_hists = evsel__hists(first);
|
|
hists = evsel__hists(evsel);
|
|
|
|
/* match common entries */
|
|
hists__match(first_hists, hists);
|
|
err = validate_match(first_hists, hists);
|
|
if (err)
|
|
goto out;
|
|
|
|
/* link common and/or dummy entries */
|
|
hists__link(first_hists, hists);
|
|
err = validate_link(first_hists, hists);
|
|
if (err)
|
|
goto out;
|
|
|
|
err = 0;
|
|
|
|
out:
|
|
/* tear down everything */
|
|
perf_evlist__delete(evlist);
|
|
reset_output_field();
|
|
machines__exit(&machines);
|
|
|
|
return err;
|
|
}
|