1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2024 Meta, Inc */
3#include <linux/bpf.h>
4#include <linux/bpf_crypto.h>
5#include <linux/bpf_mem_alloc.h>
6#include <linux/btf.h>
7#include <linux/btf_ids.h>
8#include <linux/filter.h>
9#include <linux/scatterlist.h>
10#include <linux/skbuff.h>
11#include <crypto/skcipher.h>
12
13struct bpf_crypto_type_list {
14	const struct bpf_crypto_type *type;
15	struct list_head list;
16};
17
18/* BPF crypto initialization parameters struct */
19/**
20 * struct bpf_crypto_params - BPF crypto initialization parameters structure
21 * @type:	The string of crypto operation type.
22 * @reserved:	Reserved member, will be reused for more options in future
23 *		Values:
24 *		  0
25 * @algo:	The string of algorithm to initialize.
26 * @key:	The cipher key used to init crypto algorithm.
27 * @key_len:	The length of cipher key.
28 * @authsize:	The length of authentication tag used by algorithm.
29 */
30struct bpf_crypto_params {
31	char type[14];
32	u8 reserved[2];
33	char algo[128];
34	u8 key[256];
35	u32 key_len;
36	u32 authsize;
37};
38
39static LIST_HEAD(bpf_crypto_types);
40static DECLARE_RWSEM(bpf_crypto_types_sem);
41
42/**
43 * struct bpf_crypto_ctx - refcounted BPF crypto context structure
44 * @type:	The pointer to bpf crypto type
45 * @tfm:	The pointer to instance of crypto API struct.
46 * @siv_len:    Size of IV and state storage for cipher
47 * @rcu:	The RCU head used to free the crypto context with RCU safety.
48 * @usage:	Object reference counter. When the refcount goes to 0, the
49 *		memory is released back to the BPF allocator, which provides
50 *		RCU safety.
51 */
52struct bpf_crypto_ctx {
53	const struct bpf_crypto_type *type;
54	void *tfm;
55	u32 siv_len;
56	struct rcu_head rcu;
57	refcount_t usage;
58};
59
60int bpf_crypto_register_type(const struct bpf_crypto_type *type)
61{
62	struct bpf_crypto_type_list *node;
63	int err = -EEXIST;
64
65	down_write(&bpf_crypto_types_sem);
66	list_for_each_entry(node, &bpf_crypto_types, list) {
67		if (!strcmp(node->type->name, type->name))
68			goto unlock;
69	}
70
71	node = kmalloc(sizeof(*node), GFP_KERNEL);
72	err = -ENOMEM;
73	if (!node)
74		goto unlock;
75
76	node->type = type;
77	list_add(&node->list, &bpf_crypto_types);
78	err = 0;
79
80unlock:
81	up_write(&bpf_crypto_types_sem);
82
83	return err;
84}
85EXPORT_SYMBOL_GPL(bpf_crypto_register_type);
86
87int bpf_crypto_unregister_type(const struct bpf_crypto_type *type)
88{
89	struct bpf_crypto_type_list *node;
90	int err = -ENOENT;
91
92	down_write(&bpf_crypto_types_sem);
93	list_for_each_entry(node, &bpf_crypto_types, list) {
94		if (strcmp(node->type->name, type->name))
95			continue;
96
97		list_del(&node->list);
98		kfree(node);
99		err = 0;
100		break;
101	}
102	up_write(&bpf_crypto_types_sem);
103
104	return err;
105}
106EXPORT_SYMBOL_GPL(bpf_crypto_unregister_type);
107
108static const struct bpf_crypto_type *bpf_crypto_get_type(const char *name)
109{
110	const struct bpf_crypto_type *type = ERR_PTR(-ENOENT);
111	struct bpf_crypto_type_list *node;
112
113	down_read(&bpf_crypto_types_sem);
114	list_for_each_entry(node, &bpf_crypto_types, list) {
115		if (strcmp(node->type->name, name))
116			continue;
117
118		if (try_module_get(node->type->owner))
119			type = node->type;
120		break;
121	}
122	up_read(&bpf_crypto_types_sem);
123
124	return type;
125}
126
127__bpf_kfunc_start_defs();
128
129/**
130 * bpf_crypto_ctx_create() - Create a mutable BPF crypto context.
131 *
132 * Allocates a crypto context that can be used, acquired, and released by
133 * a BPF program. The crypto context returned by this function must either
134 * be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release().
135 * As crypto API functions use GFP_KERNEL allocations, this function can
136 * only be used in sleepable BPF programs.
137 *
138 * bpf_crypto_ctx_create() allocates memory for crypto context.
139 * It may return NULL if no memory is available.
140 * @params:	pointer to struct bpf_crypto_params which contains all the
141 *		details needed to initialise crypto context.
142 * @params__sz:	size of steuct bpf_crypto_params usef by bpf program
143 * @err:	integer to store error code when NULL is returned.
144 */
145__bpf_kfunc struct bpf_crypto_ctx *
146bpf_crypto_ctx_create(const struct bpf_crypto_params *params, u32 params__sz,
147		      int *err)
148{
149	const struct bpf_crypto_type *type;
150	struct bpf_crypto_ctx *ctx;
151
152	if (!params || params->reserved[0] || params->reserved[1] ||
153	    params__sz != sizeof(struct bpf_crypto_params)) {
154		*err = -EINVAL;
155		return NULL;
156	}
157
158	type = bpf_crypto_get_type(params->type);
159	if (IS_ERR(type)) {
160		*err = PTR_ERR(type);
161		return NULL;
162	}
163
164	if (!type->has_algo(params->algo)) {
165		*err = -EOPNOTSUPP;
166		goto err_module_put;
167	}
168
169	if (!!params->authsize ^ !!type->setauthsize) {
170		*err = -EOPNOTSUPP;
171		goto err_module_put;
172	}
173
174	if (!params->key_len || params->key_len > sizeof(params->key)) {
175		*err = -EINVAL;
176		goto err_module_put;
177	}
178
179	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
180	if (!ctx) {
181		*err = -ENOMEM;
182		goto err_module_put;
183	}
184
185	ctx->type = type;
186	ctx->tfm = type->alloc_tfm(params->algo);
187	if (IS_ERR(ctx->tfm)) {
188		*err = PTR_ERR(ctx->tfm);
189		goto err_free_ctx;
190	}
191
192	if (params->authsize) {
193		*err = type->setauthsize(ctx->tfm, params->authsize);
194		if (*err)
195			goto err_free_tfm;
196	}
197
198	*err = type->setkey(ctx->tfm, params->key, params->key_len);
199	if (*err)
200		goto err_free_tfm;
201
202	if (type->get_flags(ctx->tfm) & CRYPTO_TFM_NEED_KEY) {
203		*err = -EINVAL;
204		goto err_free_tfm;
205	}
206
207	ctx->siv_len = type->ivsize(ctx->tfm) + type->statesize(ctx->tfm);
208
209	refcount_set(&ctx->usage, 1);
210
211	return ctx;
212
213err_free_tfm:
214	type->free_tfm(ctx->tfm);
215err_free_ctx:
216	kfree(ctx);
217err_module_put:
218	module_put(type->owner);
219
220	return NULL;
221}
222
223static void crypto_free_cb(struct rcu_head *head)
224{
225	struct bpf_crypto_ctx *ctx;
226
227	ctx = container_of(head, struct bpf_crypto_ctx, rcu);
228	ctx->type->free_tfm(ctx->tfm);
229	module_put(ctx->type->owner);
230	kfree(ctx);
231}
232
233/**
234 * bpf_crypto_ctx_acquire() - Acquire a reference to a BPF crypto context.
235 * @ctx: The BPF crypto context being acquired. The ctx must be a trusted
236 *	     pointer.
237 *
238 * Acquires a reference to a BPF crypto context. The context returned by this function
239 * must either be embedded in a map as a kptr, or freed with
240 * bpf_crypto_ctx_release().
241 */
242__bpf_kfunc struct bpf_crypto_ctx *
243bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx)
244{
245	if (!refcount_inc_not_zero(&ctx->usage))
246		return NULL;
247	return ctx;
248}
249
250/**
251 * bpf_crypto_ctx_release() - Release a previously acquired BPF crypto context.
252 * @ctx: The crypto context being released.
253 *
254 * Releases a previously acquired reference to a BPF crypto context. When the final
255 * reference of the BPF crypto context has been released, its memory
256 * will be released.
257 */
258__bpf_kfunc void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx)
259{
260	if (refcount_dec_and_test(&ctx->usage))
261		call_rcu(&ctx->rcu, crypto_free_cb);
262}
263
264static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx,
265			    const struct bpf_dynptr_kern *src,
266			    const struct bpf_dynptr_kern *dst,
267			    const struct bpf_dynptr_kern *siv,
268			    bool decrypt)
269{
270	u32 src_len, dst_len, siv_len;
271	const u8 *psrc;
272	u8 *pdst, *piv;
273	int err;
274
275	if (__bpf_dynptr_is_rdonly(dst))
276		return -EINVAL;
277
278	siv_len = __bpf_dynptr_size(siv);
279	src_len = __bpf_dynptr_size(src);
280	dst_len = __bpf_dynptr_size(dst);
281	if (!src_len || !dst_len)
282		return -EINVAL;
283
284	if (siv_len != ctx->siv_len)
285		return -EINVAL;
286
287	psrc = __bpf_dynptr_data(src, src_len);
288	if (!psrc)
289		return -EINVAL;
290	pdst = __bpf_dynptr_data_rw(dst, dst_len);
291	if (!pdst)
292		return -EINVAL;
293
294	piv = siv_len ? __bpf_dynptr_data_rw(siv, siv_len) : NULL;
295	if (siv_len && !piv)
296		return -EINVAL;
297
298	err = decrypt ? ctx->type->decrypt(ctx->tfm, psrc, pdst, src_len, piv)
299		      : ctx->type->encrypt(ctx->tfm, psrc, pdst, src_len, piv);
300
301	return err;
302}
303
304/**
305 * bpf_crypto_decrypt() - Decrypt buffer using configured context and IV provided.
306 * @ctx:	The crypto context being used. The ctx must be a trusted pointer.
307 * @src:	bpf_dynptr to the encrypted data. Must be a trusted pointer.
308 * @dst:	bpf_dynptr to the buffer where to store the result. Must be a trusted pointer.
309 * @siv:	bpf_dynptr to IV data and state data to be used by decryptor.
310 *
311 * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
312 */
313__bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx,
314				   const struct bpf_dynptr_kern *src,
315				   const struct bpf_dynptr_kern *dst,
316				   const struct bpf_dynptr_kern *siv)
317{
318	return bpf_crypto_crypt(ctx, src, dst, siv, true);
319}
320
321/**
322 * bpf_crypto_encrypt() - Encrypt buffer using configured context and IV provided.
323 * @ctx:	The crypto context being used. The ctx must be a trusted pointer.
324 * @src:	bpf_dynptr to the plain data. Must be a trusted pointer.
325 * @dst:	bpf_dynptr to buffer where to store the result. Must be a trusted pointer.
326 * @siv:	bpf_dynptr to IV data and state data to be used by decryptor.
327 *
328 * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured.
329 */
330__bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx,
331				   const struct bpf_dynptr_kern *src,
332				   const struct bpf_dynptr_kern *dst,
333				   const struct bpf_dynptr_kern *siv)
334{
335	return bpf_crypto_crypt(ctx, src, dst, siv, false);
336}
337
338__bpf_kfunc_end_defs();
339
340BTF_KFUNCS_START(crypt_init_kfunc_btf_ids)
341BTF_ID_FLAGS(func, bpf_crypto_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
342BTF_ID_FLAGS(func, bpf_crypto_ctx_release, KF_RELEASE)
343BTF_ID_FLAGS(func, bpf_crypto_ctx_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
344BTF_KFUNCS_END(crypt_init_kfunc_btf_ids)
345
346static const struct btf_kfunc_id_set crypt_init_kfunc_set = {
347	.owner = THIS_MODULE,
348	.set   = &crypt_init_kfunc_btf_ids,
349};
350
351BTF_KFUNCS_START(crypt_kfunc_btf_ids)
352BTF_ID_FLAGS(func, bpf_crypto_decrypt, KF_RCU)
353BTF_ID_FLAGS(func, bpf_crypto_encrypt, KF_RCU)
354BTF_KFUNCS_END(crypt_kfunc_btf_ids)
355
356static const struct btf_kfunc_id_set crypt_kfunc_set = {
357	.owner = THIS_MODULE,
358	.set   = &crypt_kfunc_btf_ids,
359};
360
361BTF_ID_LIST(bpf_crypto_dtor_ids)
362BTF_ID(struct, bpf_crypto_ctx)
363BTF_ID(func, bpf_crypto_ctx_release)
364
365static int __init crypto_kfunc_init(void)
366{
367	int ret;
368	const struct btf_id_dtor_kfunc bpf_crypto_dtors[] = {
369		{
370			.btf_id	      = bpf_crypto_dtor_ids[0],
371			.kfunc_btf_id = bpf_crypto_dtor_ids[1]
372		},
373	};
374
375	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_kfunc_set);
376	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_kfunc_set);
377	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_kfunc_set);
378	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL,
379					       &crypt_init_kfunc_set);
380	return  ret ?: register_btf_id_dtor_kfuncs(bpf_crypto_dtors,
381						   ARRAY_SIZE(bpf_crypto_dtors),
382						   THIS_MODULE);
383}
384
385late_initcall(crypto_kfunc_init);
386