|
| 1 | +/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
| 2 | + * |
| 3 | + * This program is free software; you can redistribute it and/or |
| 4 | + * modify it under the terms of version 2 of the GNU General Public |
| 5 | + * License as published by the Free Software Foundation. |
| 6 | + * |
| 7 | + * This program is distributed in the hope that it will be useful, but |
| 8 | + * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 10 | + * General Public License for more details. |
| 11 | + */ |
| 12 | +#include <linux/bpf.h> |
| 13 | +#include <linux/err.h> |
| 14 | +#include <linux/vmalloc.h> |
| 15 | +#include <linux/slab.h> |
| 16 | +#include <linux/mm.h> |
| 17 | + |
| 18 | +struct bpf_array { |
| 19 | + struct bpf_map map; |
| 20 | + u32 elem_size; |
| 21 | + char value[0] __aligned(8); |
| 22 | +}; |
| 23 | + |
| 24 | +/* Called from syscall */ |
| 25 | +static struct bpf_map *array_map_alloc(union bpf_attr *attr) |
| 26 | +{ |
| 27 | + struct bpf_array *array; |
| 28 | + u32 elem_size; |
| 29 | + |
| 30 | + /* check sanity of attributes */ |
| 31 | + if (attr->max_entries == 0 || attr->key_size != 4 || |
| 32 | + attr->value_size == 0) |
| 33 | + return ERR_PTR(-EINVAL); |
| 34 | + |
| 35 | + elem_size = round_up(attr->value_size, 8); |
| 36 | + |
| 37 | + /* allocate all map elements and zero-initialize them */ |
| 38 | + array = kzalloc(sizeof(*array) + attr->max_entries * elem_size, |
| 39 | + GFP_USER | __GFP_NOWARN); |
| 40 | + if (!array) { |
| 41 | + array = vzalloc(array->map.max_entries * array->elem_size); |
| 42 | + if (!array) |
| 43 | + return ERR_PTR(-ENOMEM); |
| 44 | + } |
| 45 | + |
| 46 | + /* copy mandatory map attributes */ |
| 47 | + array->map.key_size = attr->key_size; |
| 48 | + array->map.value_size = attr->value_size; |
| 49 | + array->map.max_entries = attr->max_entries; |
| 50 | + |
| 51 | + array->elem_size = elem_size; |
| 52 | + |
| 53 | + return &array->map; |
| 54 | + |
| 55 | +} |
| 56 | + |
| 57 | +/* Called from syscall or from eBPF program */ |
| 58 | +static void *array_map_lookup_elem(struct bpf_map *map, void *key) |
| 59 | +{ |
| 60 | + struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 61 | + u32 index = *(u32 *)key; |
| 62 | + |
| 63 | + if (index >= array->map.max_entries) |
| 64 | + return NULL; |
| 65 | + |
| 66 | + return array->value + array->elem_size * index; |
| 67 | +} |
| 68 | + |
| 69 | +/* Called from syscall */ |
| 70 | +static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) |
| 71 | +{ |
| 72 | + struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 73 | + u32 index = *(u32 *)key; |
| 74 | + u32 *next = (u32 *)next_key; |
| 75 | + |
| 76 | + if (index >= array->map.max_entries) { |
| 77 | + *next = 0; |
| 78 | + return 0; |
| 79 | + } |
| 80 | + |
| 81 | + if (index == array->map.max_entries - 1) |
| 82 | + return -ENOENT; |
| 83 | + |
| 84 | + *next = index + 1; |
| 85 | + return 0; |
| 86 | +} |
| 87 | + |
| 88 | +/* Called from syscall or from eBPF program */ |
| 89 | +static int array_map_update_elem(struct bpf_map *map, void *key, void *value, |
| 90 | + u64 map_flags) |
| 91 | +{ |
| 92 | + struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 93 | + u32 index = *(u32 *)key; |
| 94 | + |
| 95 | + if (map_flags > BPF_EXIST) |
| 96 | + /* unknown flags */ |
| 97 | + return -EINVAL; |
| 98 | + |
| 99 | + if (index >= array->map.max_entries) |
| 100 | + /* all elements were pre-allocated, cannot insert a new one */ |
| 101 | + return -E2BIG; |
| 102 | + |
| 103 | + if (map_flags == BPF_NOEXIST) |
| 104 | + /* all elemenets already exist */ |
| 105 | + return -EEXIST; |
| 106 | + |
| 107 | + memcpy(array->value + array->elem_size * index, value, array->elem_size); |
| 108 | + return 0; |
| 109 | +} |
| 110 | + |
| 111 | +/* Called from syscall or from eBPF program */ |
| 112 | +static int array_map_delete_elem(struct bpf_map *map, void *key) |
| 113 | +{ |
| 114 | + return -EINVAL; |
| 115 | +} |
| 116 | + |
| 117 | +/* Called when map->refcnt goes to zero, either from workqueue or from syscall */ |
| 118 | +static void array_map_free(struct bpf_map *map) |
| 119 | +{ |
| 120 | + struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 121 | + |
| 122 | + /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, |
| 123 | + * so the programs (can be more than one that used this map) were |
| 124 | + * disconnected from events. Wait for outstanding programs to complete |
| 125 | + * and free the array |
| 126 | + */ |
| 127 | + synchronize_rcu(); |
| 128 | + |
| 129 | + kvfree(array); |
| 130 | +} |
| 131 | + |
| 132 | +static struct bpf_map_ops array_ops = { |
| 133 | + .map_alloc = array_map_alloc, |
| 134 | + .map_free = array_map_free, |
| 135 | + .map_get_next_key = array_map_get_next_key, |
| 136 | + .map_lookup_elem = array_map_lookup_elem, |
| 137 | + .map_update_elem = array_map_update_elem, |
| 138 | + .map_delete_elem = array_map_delete_elem, |
| 139 | +}; |
| 140 | + |
| 141 | +static struct bpf_map_type_list tl = { |
| 142 | + .ops = &array_ops, |
| 143 | + .type = BPF_MAP_TYPE_ARRAY, |
| 144 | +}; |
| 145 | + |
| 146 | +static int __init register_array_map(void) |
| 147 | +{ |
| 148 | + bpf_register_map_type(&tl); |
| 149 | + return 0; |
| 150 | +} |
| 151 | +late_initcall(register_array_map); |
0 commit comments