Skip to content

Commit

Permalink
libbpf: Use .struct_ops.link section to indicate a struct_ops with a …
Browse files Browse the repository at this point in the history
…link.

Flags a struct_ops is to back a bpf_link by putting it to the
".struct_ops.link" section.  Once it is flagged, the created
struct_ops can be used to create a bpf_link or update a bpf_link that
has been backed by another struct_ops.

Signed-off-by: Kui-Feng Lee <kuifeng@meta.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20230323032405.3735486-8-kuifeng@meta.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
  • Loading branch information
Kui-Feng Lee authored and Martin KaFai Lau committed Mar 23, 2023
1 parent 912dd4b commit 809a69d
Showing 1 changed file with 44 additions and 16 deletions.
60 changes: 44 additions & 16 deletions tools/lib/bpf/libbpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,7 @@ struct bpf_struct_ops {
#define KCONFIG_SEC ".kconfig"
#define KSYMS_SEC ".ksyms"
#define STRUCT_OPS_SEC ".struct_ops"
#define STRUCT_OPS_LINK_SEC ".struct_ops.link"

enum libbpf_map_type {
LIBBPF_MAP_UNSPEC,
Expand Down Expand Up @@ -597,6 +598,7 @@ struct elf_state {
Elf64_Ehdr *ehdr;
Elf_Data *symbols;
Elf_Data *st_ops_data;
Elf_Data *st_ops_link_data;
size_t shstrndx; /* section index for section name strings */
size_t strtabidx;
struct elf_sec_desc *secs;
Expand All @@ -606,6 +608,7 @@ struct elf_state {
int text_shndx;
int symbols_shndx;
int st_ops_shndx;
int st_ops_link_shndx;
};

struct usdt_manager;
Expand Down Expand Up @@ -1119,7 +1122,8 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
return 0;
}

static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
int shndx, Elf_Data *data, __u32 map_flags)
{
const struct btf_type *type, *datasec;
const struct btf_var_secinfo *vsi;
Expand All @@ -1130,15 +1134,15 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
struct bpf_map *map;
__u32 i;

if (obj->efile.st_ops_shndx == -1)
if (shndx == -1)
return 0;

btf = obj->btf;
datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
datasec_id = btf__find_by_name_kind(btf, sec_name,
BTF_KIND_DATASEC);
if (datasec_id < 0) {
pr_warn("struct_ops init: DATASEC %s not found\n",
STRUCT_OPS_SEC);
sec_name);
return -EINVAL;
}

Expand All @@ -1151,7 +1155,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
type_id = btf__resolve_type(obj->btf, vsi->type);
if (type_id < 0) {
pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
vsi->type, STRUCT_OPS_SEC);
vsi->type, sec_name);
return -EINVAL;
}

Expand All @@ -1170,7 +1174,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
if (IS_ERR(map))
return PTR_ERR(map);

map->sec_idx = obj->efile.st_ops_shndx;
map->sec_idx = shndx;
map->sec_offset = vsi->offset;
map->name = strdup(var_name);
if (!map->name)
Expand All @@ -1180,6 +1184,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
map->def.key_size = sizeof(int);
map->def.value_size = type->size;
map->def.max_entries = 1;
map->def.map_flags = map_flags;

map->st_ops = calloc(1, sizeof(*map->st_ops));
if (!map->st_ops)
Expand All @@ -1192,14 +1197,14 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
return -ENOMEM;

if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
if (vsi->offset + type->size > data->d_size) {
pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
var_name, STRUCT_OPS_SEC);
var_name, sec_name);
return -EINVAL;
}

memcpy(st_ops->data,
obj->efile.st_ops_data->d_buf + vsi->offset,
data->d_buf + vsi->offset,
type->size);
st_ops->tname = tname;
st_ops->type = type;
Expand All @@ -1212,6 +1217,19 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
return 0;
}

static int bpf_object_init_struct_ops(struct bpf_object *obj)
{
int err;

err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx,
obj->efile.st_ops_data, 0);
err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC,
obj->efile.st_ops_link_shndx,
obj->efile.st_ops_link_data,
BPF_F_LINK);
return err;
}

static struct bpf_object *bpf_object__new(const char *path,
const void *obj_buf,
size_t obj_buf_sz,
Expand Down Expand Up @@ -1248,6 +1266,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.btf_maps_shndx = -1;
obj->efile.st_ops_shndx = -1;
obj->efile.st_ops_link_shndx = -1;
obj->kconfig_map_idx = -1;

obj->kern_version = get_kernel_version();
Expand All @@ -1265,6 +1284,7 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
obj->efile.elf = NULL;
obj->efile.symbols = NULL;
obj->efile.st_ops_data = NULL;
obj->efile.st_ops_link_data = NULL;

zfree(&obj->efile.secs);
obj->efile.sec_cnt = 0;
Expand Down Expand Up @@ -2619,7 +2639,7 @@ static int bpf_object__init_maps(struct bpf_object *obj,
err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
err = err ?: bpf_object__init_global_data_maps(obj);
err = err ?: bpf_object__init_kconfig_map(obj);
err = err ?: bpf_object__init_struct_ops_maps(obj);
err = err ?: bpf_object_init_struct_ops(obj);

return err;
}
Expand Down Expand Up @@ -2753,12 +2773,13 @@ static bool libbpf_needs_btf(const struct bpf_object *obj)
{
return obj->efile.btf_maps_shndx >= 0 ||
obj->efile.st_ops_shndx >= 0 ||
obj->efile.st_ops_link_shndx >= 0 ||
obj->nr_extern > 0;
}

static bool kernel_needs_btf(const struct bpf_object *obj)
{
return obj->efile.st_ops_shndx >= 0;
return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0;
}

static int bpf_object__init_btf(struct bpf_object *obj,
Expand Down Expand Up @@ -3451,6 +3472,9 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
obj->efile.st_ops_data = data;
obj->efile.st_ops_shndx = idx;
} else if (strcmp(name, STRUCT_OPS_LINK_SEC) == 0) {
obj->efile.st_ops_link_data = data;
obj->efile.st_ops_link_shndx = idx;
} else {
pr_info("elf: skipping unrecognized data section(%d) %s\n",
idx, name);
Expand All @@ -3465,6 +3489,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
/* Only do relo for section with exec instructions */
if (!section_have_execinstr(obj, targ_sec_idx) &&
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
strcmp(name, ".rel" MAPS_ELF_SEC)) {
pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
idx, name, targ_sec_idx,
Expand Down Expand Up @@ -6611,7 +6636,7 @@ static int bpf_object__collect_relos(struct bpf_object *obj)
return -LIBBPF_ERRNO__INTERNAL;
}

if (idx == obj->efile.st_ops_shndx)
if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx)
err = bpf_object__collect_st_ops_relos(obj, shdr, data);
else if (idx == obj->efile.btf_maps_shndx)
err = bpf_object__collect_map_relos(obj, shdr, data);
Expand Down Expand Up @@ -8853,6 +8878,7 @@ const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
}

static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
int sec_idx,
size_t offset)
{
struct bpf_map *map;
Expand All @@ -8862,7 +8888,8 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
map = &obj->maps[i];
if (!bpf_map__is_struct_ops(map))
continue;
if (map->sec_offset <= offset &&
if (map->sec_idx == sec_idx &&
map->sec_offset <= offset &&
offset - map->sec_offset < map->def.value_size)
return map;
}
Expand Down Expand Up @@ -8904,7 +8931,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
}

name = elf_sym_str(obj, sym->st_name) ?: "<?>";
map = find_struct_ops_map_by_offset(obj, rel->r_offset);
map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
if (!map) {
pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
(size_t)rel->r_offset);
Expand Down Expand Up @@ -8971,8 +8998,9 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
}

/* struct_ops BPF prog can be re-used between multiple
* .struct_ops as long as it's the same struct_ops struct
* definition and the same function pointer field
* .struct_ops & .struct_ops.link as long as it's the
* same struct_ops struct definition and the same
* function pointer field
*/
if (prog->attach_btf_id != st_ops->type_id ||
prog->expected_attach_type != member_idx) {
Expand Down

0 comments on commit 809a69d

Please sign in to comment.