Lines Matching refs:array

22 static void bpf_array_free_percpu(struct bpf_array *array)
26 for (i = 0; i < array->map.max_entries; i++) {
27 free_percpu(array->pptrs[i]);
32 static int bpf_array_alloc_percpu(struct bpf_array *array)
37 for (i = 0; i < array->map.max_entries; i++) {
38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
41 bpf_array_free_percpu(array);
44 array->pptrs[i] = ptr;
87 struct bpf_array *array;
103 /* round up array size to nearest power of 2,
112 array_size = sizeof(*array);
117 * ensure array->value is exactly page-aligned
135 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
138 array = bpf_map_area_alloc(array_size, numa_node);
140 if (!array)
142 array->index_mask = index_mask;
143 array->map.bypass_spec_v1 = bypass_spec_v1;
146 bpf_map_init_from_attr(&array->map, attr);
147 array->elem_size = elem_size;
149 if (percpu && bpf_array_alloc_percpu(array)) {
150 bpf_map_area_free(array);
154 return &array->map;
157 static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
159 return array->value + (u64)array->elem_size * index;
165 struct bpf_array *array = container_of(map, struct bpf_array, map);
168 if (unlikely(index >= array->map.max_entries))
171 return array->value + (u64)array->elem_size * (index & array->index_mask);
177 struct bpf_array *array = container_of(map, struct bpf_array, map);
184 *imm = (unsigned long)array->value;
191 struct bpf_array *array = container_of(map, struct bpf_array, map);
192 u64 base = (unsigned long)array->value;
193 u64 range = array->elem_size;
207 struct bpf_array *array = container_of(map, struct bpf_array, map);
209 u32 elem_size = array->elem_size;
221 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
240 struct bpf_array *array = container_of(map, struct bpf_array, map);
243 if (unlikely(index >= array->map.max_entries))
246 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
252 struct bpf_array *array = container_of(map, struct bpf_array, map);
267 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
283 struct bpf_array *array = container_of(map, struct bpf_array, map);
289 if (unlikely(index >= array->map.max_entries))
292 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
297 struct bpf_array *array = container_of(map, struct bpf_array, map);
303 if (unlikely(index >= array->map.max_entries))
310 size = array->elem_size;
312 pptr = array->pptrs[index & array->index_mask];
325 struct bpf_array *array = container_of(map, struct bpf_array, map);
329 if (index >= array->map.max_entries) {
334 if (index == array->map.max_entries - 1)
345 struct bpf_array *array = container_of(map, struct bpf_array, map);
353 if (unlikely(index >= array->map.max_entries))
365 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
366 val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
368 bpf_obj_free_fields(array->map.record, val);
370 val = array->value +
371 (u64)array->elem_size * (index & array->index_mask);
376 bpf_obj_free_fields(array->map.record, val);
384 struct bpf_array *array = container_of(map, struct bpf_array, map);
394 if (unlikely(index >= array->map.max_entries))
408 size = array->elem_size;
410 pptr = array->pptrs[index & array->index_mask];
413 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
426 static void *array_map_vmalloc_addr(struct bpf_array *array)
428 return (void *)round_down((unsigned long)array, PAGE_SIZE);
433 struct bpf_array *array = container_of(map, struct bpf_array, map);
440 for (i = 0; i < array->map.max_entries; i++) {
442 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
444 bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i));
452 struct bpf_array *array = container_of(map, struct bpf_array, map);
456 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
457 for (i = 0; i < array->map.max_entries; i++) {
458 void __percpu *pptr = array->pptrs[i & array->index_mask];
467 for (i = 0; i < array->map.max_entries; i++)
468 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
472 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
473 bpf_array_free_percpu(array);
475 if (array->map.map_flags & BPF_F_MMAPABLE)
476 bpf_map_area_free(array_map_vmalloc_addr(array));
478 bpf_map_area_free(array);
505 struct bpf_array *array = container_of(map, struct bpf_array, map);
513 pptr = array->pptrs[index & array->index_mask];
548 /* bpf array can only take a u32 key. This check makes sure
559 struct bpf_array *array = container_of(map, struct bpf_array, map);
560 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
566 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
569 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
592 struct bpf_array *array;
600 array = container_of(map, struct bpf_array, map);
601 index = info->index & array->index_mask;
603 return array->pptrs[index];
604 return array_map_elem_ptr(array, index);
611 struct bpf_array *array;
619 array = container_of(map, struct bpf_array, map);
620 index = info->index & array->index_mask;
622 return array->pptrs[index];
623 return array_map_elem_ptr(array, index);
631 struct bpf_array *array = container_of(map, struct bpf_array, map);
652 size = array->elem_size;
682 struct bpf_array *array = container_of(map, struct bpf_array, map);
687 buf_size = array->elem_size * num_possible_cpus();
730 struct bpf_array *array;
739 array = container_of(map, struct bpf_array, map);
744 val = this_cpu_ptr(array->pptrs[i]);
746 val = array_map_elem_ptr(array, i);
763 struct bpf_array *array = container_of(map, struct bpf_array, map);
765 u32 elem_size = array->elem_size;
767 u64 usage = sizeof(*array);
844 struct bpf_array *array = container_of(map, struct bpf_array, map);
848 for (i = 0; i < array->map.max_entries; i++)
849 BUG_ON(array->ptrs[i] != NULL);
851 bpf_map_area_free(array);
883 struct bpf_array *array = container_of(map, struct bpf_array, map);
890 if (index >= array->map.max_entries)
899 mutex_lock(&array->aux->poke_mutex);
900 old_ptr = xchg(array->ptrs + index, new_ptr);
902 mutex_unlock(&array->aux->poke_mutex);
904 old_ptr = xchg(array->ptrs + index, new_ptr);
914 struct bpf_array *array = container_of(map, struct bpf_array, map);
918 if (index >= array->map.max_entries)
922 mutex_lock(&array->aux->poke_mutex);
923 old_ptr = xchg(array->ptrs + index, NULL);
925 mutex_unlock(&array->aux->poke_mutex);
927 old_ptr = xchg(array->ptrs + index, NULL);
973 struct bpf_array *array = container_of(map, struct bpf_array, map);
976 for (i = 0; i < array->map.max_entries; i++)
1257 struct bpf_array *array = container_of(map, struct bpf_array, map);
1265 for (i = 0; i < array->map.max_entries; i++) {
1266 ee = READ_ONCE(array->ptrs[i]);
1374 struct bpf_array *array = container_of(map, struct bpf_array, map);
1375 u32 elem_size = array->elem_size;
1385 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);