1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 HiSilicon Limited. */
3#include <crypto/akcipher.h>
4#include <crypto/curve25519.h>
5#include <crypto/dh.h>
6#include <crypto/ecc_curve.h>
7#include <crypto/ecdh.h>
8#include <crypto/rng.h>
9#include <crypto/internal/akcipher.h>
10#include <crypto/internal/kpp.h>
11#include <crypto/internal/rsa.h>
12#include <crypto/kpp.h>
13#include <crypto/scatterwalk.h>
14#include <linux/dma-mapping.h>
15#include <linux/fips.h>
16#include <linux/module.h>
17#include <linux/time.h>
18#include "hpre.h"
19
20struct hpre_ctx;
21
22#define HPRE_CRYPTO_ALG_PRI	1000
23#define HPRE_ALIGN_SZ		64
24#define HPRE_BITS_2_BYTES_SHIFT	3
25#define HPRE_RSA_512BITS_KSZ	64
26#define HPRE_RSA_1536BITS_KSZ	192
27#define HPRE_CRT_PRMS		5
28#define HPRE_CRT_Q		2
29#define HPRE_CRT_P		3
30#define HPRE_CRT_INV		4
31#define HPRE_DH_G_FLAG		0x02
32#define HPRE_TRY_SEND_TIMES	100
33#define HPRE_INVLD_REQ_ID		(-1)
34
35#define HPRE_SQE_ALG_BITS	5
36#define HPRE_SQE_DONE_SHIFT	30
37#define HPRE_DH_MAX_P_SZ	512
38
39#define HPRE_DFX_SEC_TO_US	1000000
40#define HPRE_DFX_US_TO_NS	1000
41
42/* due to nist p521  */
43#define HPRE_ECC_MAX_KSZ	66
44
45/* size in bytes of the n prime */
46#define HPRE_ECC_NIST_P192_N_SIZE	24
47#define HPRE_ECC_NIST_P256_N_SIZE	32
48#define HPRE_ECC_NIST_P384_N_SIZE	48
49
50/* size in bytes */
51#define HPRE_ECC_HW256_KSZ_B	32
52#define HPRE_ECC_HW384_KSZ_B	48
53
54/* capability register mask of driver */
55#define HPRE_DRV_RSA_MASK_CAP		BIT(0)
56#define HPRE_DRV_DH_MASK_CAP		BIT(1)
57#define HPRE_DRV_ECDH_MASK_CAP		BIT(2)
58#define HPRE_DRV_X25519_MASK_CAP	BIT(5)
59
60static DEFINE_MUTEX(hpre_algs_lock);
61static unsigned int hpre_available_devs;
62
63typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
64
65struct hpre_rsa_ctx {
66	/* low address: e--->n */
67	char *pubkey;
68	dma_addr_t dma_pubkey;
69
70	/* low address: d--->n */
71	char *prikey;
72	dma_addr_t dma_prikey;
73
74	/* low address: dq->dp->q->p->qinv */
75	char *crt_prikey;
76	dma_addr_t dma_crt_prikey;
77
78	struct crypto_akcipher *soft_tfm;
79};
80
81struct hpre_dh_ctx {
82	/*
83	 * If base is g we compute the public key
84	 *	ya = g^xa mod p; [RFC2631 sec 2.1.1]
85	 * else if base if the counterpart public key we
86	 * compute the shared secret
87	 *	ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
88	 * low address: d--->n, please refer to Hisilicon HPRE UM
89	 */
90	char *xa_p;
91	dma_addr_t dma_xa_p;
92
93	char *g; /* m */
94	dma_addr_t dma_g;
95};
96
97struct hpre_ecdh_ctx {
98	/* low address: p->a->k->b */
99	unsigned char *p;
100	dma_addr_t dma_p;
101
102	/* low address: x->y */
103	unsigned char *g;
104	dma_addr_t dma_g;
105};
106
107struct hpre_curve25519_ctx {
108	/* low address: p->a->k */
109	unsigned char *p;
110	dma_addr_t dma_p;
111
112	/* gx coordinate */
113	unsigned char *g;
114	dma_addr_t dma_g;
115};
116
117struct hpre_ctx {
118	struct hisi_qp *qp;
119	struct device *dev;
120	struct hpre_asym_request **req_list;
121	struct hpre *hpre;
122	spinlock_t req_lock;
123	unsigned int key_sz;
124	bool crt_g2_mode;
125	struct idr req_idr;
126	union {
127		struct hpre_rsa_ctx rsa;
128		struct hpre_dh_ctx dh;
129		struct hpre_ecdh_ctx ecdh;
130		struct hpre_curve25519_ctx curve25519;
131	};
132	/* for ecc algorithms */
133	unsigned int curve_id;
134};
135
136struct hpre_asym_request {
137	char *src;
138	char *dst;
139	struct hpre_sqe req;
140	struct hpre_ctx *ctx;
141	union {
142		struct akcipher_request *rsa;
143		struct kpp_request *dh;
144		struct kpp_request *ecdh;
145		struct kpp_request *curve25519;
146	} areq;
147	int err;
148	int req_id;
149	hpre_cb cb;
150	struct timespec64 req_time;
151};
152
153static inline unsigned int hpre_align_sz(void)
154{
155	return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1;
156}
157
158static inline unsigned int hpre_align_pd(void)
159{
160	return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
161}
162
163static int hpre_alloc_req_id(struct hpre_ctx *ctx)
164{
165	unsigned long flags;
166	int id;
167
168	spin_lock_irqsave(&ctx->req_lock, flags);
169	id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
170	spin_unlock_irqrestore(&ctx->req_lock, flags);
171
172	return id;
173}
174
175static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
176{
177	unsigned long flags;
178
179	spin_lock_irqsave(&ctx->req_lock, flags);
180	idr_remove(&ctx->req_idr, req_id);
181	spin_unlock_irqrestore(&ctx->req_lock, flags);
182}
183
184static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
185{
186	struct hpre_ctx *ctx;
187	struct hpre_dfx *dfx;
188	int id;
189
190	ctx = hpre_req->ctx;
191	id = hpre_alloc_req_id(ctx);
192	if (unlikely(id < 0))
193		return -EINVAL;
194
195	ctx->req_list[id] = hpre_req;
196	hpre_req->req_id = id;
197
198	dfx = ctx->hpre->debug.dfx;
199	if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
200		ktime_get_ts64(&hpre_req->req_time);
201
202	return id;
203}
204
205static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
206{
207	struct hpre_ctx *ctx = hpre_req->ctx;
208	int id = hpre_req->req_id;
209
210	if (hpre_req->req_id >= 0) {
211		hpre_req->req_id = HPRE_INVLD_REQ_ID;
212		ctx->req_list[id] = NULL;
213		hpre_free_req_id(ctx, id);
214	}
215}
216
217static struct hisi_qp *hpre_get_qp_and_start(u8 type)
218{
219	struct hisi_qp *qp;
220	int ret;
221
222	qp = hpre_create_qp(type);
223	if (!qp) {
224		pr_err("Can not create hpre qp!\n");
225		return ERR_PTR(-ENODEV);
226	}
227
228	ret = hisi_qm_start_qp(qp, 0);
229	if (ret < 0) {
230		hisi_qm_free_qps(&qp, 1);
231		pci_err(qp->qm->pdev, "Can not start qp!\n");
232		return ERR_PTR(-EINVAL);
233	}
234
235	return qp;
236}
237
238static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
239				  struct scatterlist *data, unsigned int len,
240				  int is_src, dma_addr_t *tmp)
241{
242	struct device *dev = hpre_req->ctx->dev;
243	enum dma_data_direction dma_dir;
244
245	if (is_src) {
246		hpre_req->src = NULL;
247		dma_dir = DMA_TO_DEVICE;
248	} else {
249		hpre_req->dst = NULL;
250		dma_dir = DMA_FROM_DEVICE;
251	}
252	*tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
253	if (unlikely(dma_mapping_error(dev, *tmp))) {
254		dev_err(dev, "dma map data err!\n");
255		return -ENOMEM;
256	}
257
258	return 0;
259}
260
261static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
262				struct scatterlist *data, unsigned int len,
263				int is_src, dma_addr_t *tmp)
264{
265	struct hpre_ctx *ctx = hpre_req->ctx;
266	struct device *dev = ctx->dev;
267	void *ptr;
268	int shift;
269
270	shift = ctx->key_sz - len;
271	if (unlikely(shift < 0))
272		return -EINVAL;
273
274	ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
275	if (unlikely(!ptr))
276		return -ENOMEM;
277
278	if (is_src) {
279		scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
280		hpre_req->src = ptr;
281	} else {
282		hpre_req->dst = ptr;
283	}
284
285	return 0;
286}
287
288static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
289			     struct scatterlist *data, unsigned int len,
290			     int is_src, int is_dh)
291{
292	struct hpre_sqe *msg = &hpre_req->req;
293	struct hpre_ctx *ctx = hpre_req->ctx;
294	dma_addr_t tmp = 0;
295	int ret;
296
297	/* when the data is dh's source, we should format it */
298	if ((sg_is_last(data) && len == ctx->key_sz) &&
299	    ((is_dh && !is_src) || !is_dh))
300		ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
301	else
302		ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
303
304	if (unlikely(ret))
305		return ret;
306
307	if (is_src)
308		msg->in = cpu_to_le64(tmp);
309	else
310		msg->out = cpu_to_le64(tmp);
311
312	return 0;
313}
314
315static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
316				 struct hpre_asym_request *req,
317				 struct scatterlist *dst,
318				 struct scatterlist *src)
319{
320	struct device *dev = ctx->dev;
321	struct hpre_sqe *sqe = &req->req;
322	dma_addr_t tmp;
323
324	tmp = le64_to_cpu(sqe->in);
325	if (unlikely(dma_mapping_error(dev, tmp)))
326		return;
327
328	if (src) {
329		if (req->src)
330			dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
331		else
332			dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
333	}
334
335	tmp = le64_to_cpu(sqe->out);
336	if (unlikely(dma_mapping_error(dev, tmp)))
337		return;
338
339	if (req->dst) {
340		if (dst)
341			scatterwalk_map_and_copy(req->dst, dst, 0,
342						 ctx->key_sz, 1);
343		dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
344	} else {
345		dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
346	}
347}
348
349static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
350				void **kreq)
351{
352	struct hpre_asym_request *req;
353	unsigned int err, done, alg;
354	int id;
355
356#define HPRE_NO_HW_ERR		0
357#define HPRE_HW_TASK_DONE	3
358#define HREE_HW_ERR_MASK	GENMASK(10, 0)
359#define HREE_SQE_DONE_MASK	GENMASK(1, 0)
360#define HREE_ALG_TYPE_MASK	GENMASK(4, 0)
361	id = (int)le16_to_cpu(sqe->tag);
362	req = ctx->req_list[id];
363	hpre_rm_req_from_ctx(req);
364	*kreq = req;
365
366	err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
367		HREE_HW_ERR_MASK;
368
369	done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
370		HREE_SQE_DONE_MASK;
371
372	if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
373		return 0;
374
375	alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
376	dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
377		alg, done, err);
378
379	return -EINVAL;
380}
381
382static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
383{
384	struct hpre *hpre;
385
386	if (!ctx || !qp || qlen < 0)
387		return -EINVAL;
388
389	spin_lock_init(&ctx->req_lock);
390	ctx->qp = qp;
391	ctx->dev = &qp->qm->pdev->dev;
392
393	hpre = container_of(ctx->qp->qm, struct hpre, qm);
394	ctx->hpre = hpre;
395	ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
396	if (!ctx->req_list)
397		return -ENOMEM;
398	ctx->key_sz = 0;
399	ctx->crt_g2_mode = false;
400	idr_init(&ctx->req_idr);
401
402	return 0;
403}
404
405static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
406{
407	if (is_clear_all) {
408		idr_destroy(&ctx->req_idr);
409		kfree(ctx->req_list);
410		hisi_qm_free_qps(&ctx->qp, 1);
411	}
412
413	ctx->crt_g2_mode = false;
414	ctx->key_sz = 0;
415}
416
417static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
418			       u64 overtime_thrhld)
419{
420	struct timespec64 reply_time;
421	u64 time_use_us;
422
423	ktime_get_ts64(&reply_time);
424	time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
425		HPRE_DFX_SEC_TO_US +
426		(reply_time.tv_nsec - req->req_time.tv_nsec) /
427		HPRE_DFX_US_TO_NS;
428
429	if (time_use_us <= overtime_thrhld)
430		return false;
431
432	return true;
433}
434
435static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
436{
437	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
438	struct hpre_asym_request *req;
439	struct kpp_request *areq;
440	u64 overtime_thrhld;
441	int ret;
442
443	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
444	areq = req->areq.dh;
445	areq->dst_len = ctx->key_sz;
446
447	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
448	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
449		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
450
451	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
452	kpp_request_complete(areq, ret);
453	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
454}
455
456static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
457{
458	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
459	struct hpre_asym_request *req;
460	struct akcipher_request *areq;
461	u64 overtime_thrhld;
462	int ret;
463
464	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
465
466	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
467	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
468		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
469
470	areq = req->areq.rsa;
471	areq->dst_len = ctx->key_sz;
472	hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
473	akcipher_request_complete(areq, ret);
474	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
475}
476
477static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
478{
479	struct hpre_ctx *ctx = qp->qp_ctx;
480	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
481	struct hpre_sqe *sqe = resp;
482	struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
483
484	if (unlikely(!req)) {
485		atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
486		return;
487	}
488
489	req->cb(ctx, resp);
490}
491
492static void hpre_stop_qp_and_put(struct hisi_qp *qp)
493{
494	hisi_qm_stop_qp(qp);
495	hisi_qm_free_qps(&qp, 1);
496}
497
498static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
499{
500	struct hisi_qp *qp;
501	int ret;
502
503	qp = hpre_get_qp_and_start(type);
504	if (IS_ERR(qp))
505		return PTR_ERR(qp);
506
507	qp->qp_ctx = ctx;
508	qp->req_cb = hpre_alg_cb;
509
510	ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
511	if (ret)
512		hpre_stop_qp_and_put(qp);
513
514	return ret;
515}
516
517static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
518{
519	struct hpre_asym_request *h_req;
520	struct hpre_sqe *msg;
521	int req_id;
522	void *tmp;
523
524	if (is_rsa) {
525		struct akcipher_request *akreq = req;
526
527		if (akreq->dst_len < ctx->key_sz) {
528			akreq->dst_len = ctx->key_sz;
529			return -EOVERFLOW;
530		}
531
532		tmp = akcipher_request_ctx(akreq);
533		h_req = PTR_ALIGN(tmp, hpre_align_sz());
534		h_req->cb = hpre_rsa_cb;
535		h_req->areq.rsa = akreq;
536		msg = &h_req->req;
537		memset(msg, 0, sizeof(*msg));
538	} else {
539		struct kpp_request *kreq = req;
540
541		if (kreq->dst_len < ctx->key_sz) {
542			kreq->dst_len = ctx->key_sz;
543			return -EOVERFLOW;
544		}
545
546		tmp = kpp_request_ctx(kreq);
547		h_req = PTR_ALIGN(tmp, hpre_align_sz());
548		h_req->cb = hpre_dh_cb;
549		h_req->areq.dh = kreq;
550		msg = &h_req->req;
551		memset(msg, 0, sizeof(*msg));
552		msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
553	}
554
555	msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
556	msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
557	msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
558	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
559	h_req->ctx = ctx;
560
561	req_id = hpre_add_req_to_ctx(h_req);
562	if (req_id < 0)
563		return -EBUSY;
564
565	msg->tag = cpu_to_le16((u16)req_id);
566
567	return 0;
568}
569
570static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
571{
572	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
573	int ctr = 0;
574	int ret;
575
576	do {
577		atomic64_inc(&dfx[HPRE_SEND_CNT].value);
578		ret = hisi_qp_send(ctx->qp, msg);
579		if (ret != -EBUSY)
580			break;
581		atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
582	} while (ctr++ < HPRE_TRY_SEND_TIMES);
583
584	if (likely(!ret))
585		return ret;
586
587	if (ret != -EBUSY)
588		atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
589
590	return ret;
591}
592
593static int hpre_dh_compute_value(struct kpp_request *req)
594{
595	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
596	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
597	void *tmp = kpp_request_ctx(req);
598	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
599	struct hpre_sqe *msg = &hpre_req->req;
600	int ret;
601
602	ret = hpre_msg_request_set(ctx, req, false);
603	if (unlikely(ret))
604		return ret;
605
606	if (req->src) {
607		ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
608		if (unlikely(ret))
609			goto clear_all;
610	} else {
611		msg->in = cpu_to_le64(ctx->dh.dma_g);
612	}
613
614	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
615	if (unlikely(ret))
616		goto clear_all;
617
618	if (ctx->crt_g2_mode && !req->src)
619		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
620	else
621		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
622
623	/* success */
624	ret = hpre_send(ctx, msg);
625	if (likely(!ret))
626		return -EINPROGRESS;
627
628clear_all:
629	hpre_rm_req_from_ctx(hpre_req);
630	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
631
632	return ret;
633}
634
635static int hpre_is_dh_params_length_valid(unsigned int key_sz)
636{
637#define _HPRE_DH_GRP1		768
638#define _HPRE_DH_GRP2		1024
639#define _HPRE_DH_GRP5		1536
640#define _HPRE_DH_GRP14		2048
641#define _HPRE_DH_GRP15		3072
642#define _HPRE_DH_GRP16		4096
643	switch (key_sz) {
644	case _HPRE_DH_GRP1:
645	case _HPRE_DH_GRP2:
646	case _HPRE_DH_GRP5:
647	case _HPRE_DH_GRP14:
648	case _HPRE_DH_GRP15:
649	case _HPRE_DH_GRP16:
650		return 0;
651	default:
652		return -EINVAL;
653	}
654}
655
656static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
657{
658	struct device *dev = ctx->dev;
659	unsigned int sz;
660
661	if (params->p_size > HPRE_DH_MAX_P_SZ)
662		return -EINVAL;
663
664	if (hpre_is_dh_params_length_valid(params->p_size <<
665					   HPRE_BITS_2_BYTES_SHIFT))
666		return -EINVAL;
667
668	sz = ctx->key_sz = params->p_size;
669	ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
670					  &ctx->dh.dma_xa_p, GFP_KERNEL);
671	if (!ctx->dh.xa_p)
672		return -ENOMEM;
673
674	memcpy(ctx->dh.xa_p + sz, params->p, sz);
675
676	/* If g equals 2 don't copy it */
677	if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
678		ctx->crt_g2_mode = true;
679		return 0;
680	}
681
682	ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
683	if (!ctx->dh.g) {
684		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
685				  ctx->dh.dma_xa_p);
686		ctx->dh.xa_p = NULL;
687		return -ENOMEM;
688	}
689
690	memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
691
692	return 0;
693}
694
695static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
696{
697	struct device *dev = ctx->dev;
698	unsigned int sz = ctx->key_sz;
699
700	if (is_clear_all)
701		hisi_qm_stop_qp(ctx->qp);
702
703	if (ctx->dh.g) {
704		dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
705		ctx->dh.g = NULL;
706	}
707
708	if (ctx->dh.xa_p) {
709		memzero_explicit(ctx->dh.xa_p, sz);
710		dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
711				  ctx->dh.dma_xa_p);
712		ctx->dh.xa_p = NULL;
713	}
714
715	hpre_ctx_clear(ctx, is_clear_all);
716}
717
718static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
719			      unsigned int len)
720{
721	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
722	struct dh params;
723	int ret;
724
725	if (crypto_dh_decode_key(buf, len, &params) < 0)
726		return -EINVAL;
727
728	/* Free old secret if any */
729	hpre_dh_clear_ctx(ctx, false);
730
731	ret = hpre_dh_set_params(ctx, &params);
732	if (ret < 0)
733		goto err_clear_ctx;
734
735	memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
736	       params.key_size);
737
738	return 0;
739
740err_clear_ctx:
741	hpre_dh_clear_ctx(ctx, false);
742	return ret;
743}
744
745static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
746{
747	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
748
749	return ctx->key_sz;
750}
751
752static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
753{
754	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
755
756	kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
757
758	return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
759}
760
761static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
762{
763	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
764
765	hpre_dh_clear_ctx(ctx, true);
766}
767
768static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
769{
770	while (!**ptr && *len) {
771		(*ptr)++;
772		(*len)--;
773	}
774}
775
776static bool hpre_rsa_key_size_is_support(unsigned int len)
777{
778	unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
779
780#define _RSA_1024BITS_KEY_WDTH		1024
781#define _RSA_2048BITS_KEY_WDTH		2048
782#define _RSA_3072BITS_KEY_WDTH		3072
783#define _RSA_4096BITS_KEY_WDTH		4096
784
785	switch (bits) {
786	case _RSA_1024BITS_KEY_WDTH:
787	case _RSA_2048BITS_KEY_WDTH:
788	case _RSA_3072BITS_KEY_WDTH:
789	case _RSA_4096BITS_KEY_WDTH:
790		return true;
791	default:
792		return false;
793	}
794}
795
796static int hpre_rsa_enc(struct akcipher_request *req)
797{
798	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
799	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
800	void *tmp = akcipher_request_ctx(req);
801	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
802	struct hpre_sqe *msg = &hpre_req->req;
803	int ret;
804
805	/* For 512 and 1536 bits key size, use soft tfm instead */
806	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
807	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
808		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
809		ret = crypto_akcipher_encrypt(req);
810		akcipher_request_set_tfm(req, tfm);
811		return ret;
812	}
813
814	if (unlikely(!ctx->rsa.pubkey))
815		return -EINVAL;
816
817	ret = hpre_msg_request_set(ctx, req, true);
818	if (unlikely(ret))
819		return ret;
820
821	msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
822	msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
823
824	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
825	if (unlikely(ret))
826		goto clear_all;
827
828	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
829	if (unlikely(ret))
830		goto clear_all;
831
832	/* success */
833	ret = hpre_send(ctx, msg);
834	if (likely(!ret))
835		return -EINPROGRESS;
836
837clear_all:
838	hpre_rm_req_from_ctx(hpre_req);
839	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
840
841	return ret;
842}
843
844static int hpre_rsa_dec(struct akcipher_request *req)
845{
846	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
847	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
848	void *tmp = akcipher_request_ctx(req);
849	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
850	struct hpre_sqe *msg = &hpre_req->req;
851	int ret;
852
853	/* For 512 and 1536 bits key size, use soft tfm instead */
854	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
855	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
856		akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
857		ret = crypto_akcipher_decrypt(req);
858		akcipher_request_set_tfm(req, tfm);
859		return ret;
860	}
861
862	if (unlikely(!ctx->rsa.prikey))
863		return -EINVAL;
864
865	ret = hpre_msg_request_set(ctx, req, true);
866	if (unlikely(ret))
867		return ret;
868
869	if (ctx->crt_g2_mode) {
870		msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
871		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
872				       HPRE_ALG_NC_CRT);
873	} else {
874		msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
875		msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
876				       HPRE_ALG_NC_NCRT);
877	}
878
879	ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
880	if (unlikely(ret))
881		goto clear_all;
882
883	ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
884	if (unlikely(ret))
885		goto clear_all;
886
887	/* success */
888	ret = hpre_send(ctx, msg);
889	if (likely(!ret))
890		return -EINPROGRESS;
891
892clear_all:
893	hpre_rm_req_from_ctx(hpre_req);
894	hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
895
896	return ret;
897}
898
899static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
900			  size_t vlen, bool private)
901{
902	const char *ptr = value;
903
904	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
905
906	ctx->key_sz = vlen;
907
908	/* if invalid key size provided, we use software tfm */
909	if (!hpre_rsa_key_size_is_support(ctx->key_sz))
910		return 0;
911
912	ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
913					     &ctx->rsa.dma_pubkey,
914					     GFP_KERNEL);
915	if (!ctx->rsa.pubkey)
916		return -ENOMEM;
917
918	if (private) {
919		ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
920						     &ctx->rsa.dma_prikey,
921						     GFP_KERNEL);
922		if (!ctx->rsa.prikey) {
923			dma_free_coherent(ctx->dev, vlen << 1,
924					  ctx->rsa.pubkey,
925					  ctx->rsa.dma_pubkey);
926			ctx->rsa.pubkey = NULL;
927			return -ENOMEM;
928		}
929		memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
930	}
931	memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
932
933	/* Using hardware HPRE to do RSA */
934	return 1;
935}
936
937static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
938			  size_t vlen)
939{
940	const char *ptr = value;
941
942	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
943
944	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
945		return -EINVAL;
946
947	memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
948
949	return 0;
950}
951
952static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
953			  size_t vlen)
954{
955	const char *ptr = value;
956
957	hpre_rsa_drop_leading_zeros(&ptr, &vlen);
958
959	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
960		return -EINVAL;
961
962	memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
963
964	return 0;
965}
966
967static int hpre_crt_para_get(char *para, size_t para_sz,
968			     const char *raw, size_t raw_sz)
969{
970	const char *ptr = raw;
971	size_t len = raw_sz;
972
973	hpre_rsa_drop_leading_zeros(&ptr, &len);
974	if (!len || len > para_sz)
975		return -EINVAL;
976
977	memcpy(para + para_sz - len, ptr, len);
978
979	return 0;
980}
981
982static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
983{
984	unsigned int hlf_ksz = ctx->key_sz >> 1;
985	struct device *dev = ctx->dev;
986	u64 offset;
987	int ret;
988
989	ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
990					&ctx->rsa.dma_crt_prikey,
991					GFP_KERNEL);
992	if (!ctx->rsa.crt_prikey)
993		return -ENOMEM;
994
995	ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
996				rsa_key->dq, rsa_key->dq_sz);
997	if (ret)
998		goto free_key;
999
1000	offset = hlf_ksz;
1001	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1002				rsa_key->dp, rsa_key->dp_sz);
1003	if (ret)
1004		goto free_key;
1005
1006	offset = hlf_ksz * HPRE_CRT_Q;
1007	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1008				rsa_key->q, rsa_key->q_sz);
1009	if (ret)
1010		goto free_key;
1011
1012	offset = hlf_ksz * HPRE_CRT_P;
1013	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1014				rsa_key->p, rsa_key->p_sz);
1015	if (ret)
1016		goto free_key;
1017
1018	offset = hlf_ksz * HPRE_CRT_INV;
1019	ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1020				rsa_key->qinv, rsa_key->qinv_sz);
1021	if (ret)
1022		goto free_key;
1023
1024	ctx->crt_g2_mode = true;
1025
1026	return 0;
1027
1028free_key:
1029	offset = hlf_ksz * HPRE_CRT_PRMS;
1030	memzero_explicit(ctx->rsa.crt_prikey, offset);
1031	dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
1032			  ctx->rsa.dma_crt_prikey);
1033	ctx->rsa.crt_prikey = NULL;
1034	ctx->crt_g2_mode = false;
1035
1036	return ret;
1037}
1038
1039/* If it is clear all, all the resources of the QP will be cleaned. */
1040static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
1041{
1042	unsigned int half_key_sz = ctx->key_sz >> 1;
1043	struct device *dev = ctx->dev;
1044
1045	if (is_clear_all)
1046		hisi_qm_stop_qp(ctx->qp);
1047
1048	if (ctx->rsa.pubkey) {
1049		dma_free_coherent(dev, ctx->key_sz << 1,
1050				  ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
1051		ctx->rsa.pubkey = NULL;
1052	}
1053
1054	if (ctx->rsa.crt_prikey) {
1055		memzero_explicit(ctx->rsa.crt_prikey,
1056				 half_key_sz * HPRE_CRT_PRMS);
1057		dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
1058				  ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
1059		ctx->rsa.crt_prikey = NULL;
1060	}
1061
1062	if (ctx->rsa.prikey) {
1063		memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
1064		dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
1065				  ctx->rsa.dma_prikey);
1066		ctx->rsa.prikey = NULL;
1067	}
1068
1069	hpre_ctx_clear(ctx, is_clear_all);
1070}
1071
1072/*
1073 * we should judge if it is CRT or not,
1074 * CRT: return true,  N-CRT: return false .
1075 */
1076static bool hpre_is_crt_key(struct rsa_key *key)
1077{
1078	u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1079		  key->qinv_sz;
1080
1081#define LEN_OF_NCRT_PARA	5
1082
1083	/* N-CRT less than 5 parameters */
1084	return len > LEN_OF_NCRT_PARA;
1085}
1086
1087static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1088			   unsigned int keylen, bool private)
1089{
1090	struct rsa_key rsa_key;
1091	int ret;
1092
1093	hpre_rsa_clear_ctx(ctx, false);
1094
1095	if (private)
1096		ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1097	else
1098		ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1099	if (ret < 0)
1100		return ret;
1101
1102	ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1103	if (ret <= 0)
1104		return ret;
1105
1106	if (private) {
1107		ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1108		if (ret < 0)
1109			goto free;
1110
1111		if (hpre_is_crt_key(&rsa_key)) {
1112			ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1113			if (ret < 0)
1114				goto free;
1115		}
1116	}
1117
1118	ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1119	if (ret < 0)
1120		goto free;
1121
1122	if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1123		ret = -EINVAL;
1124		goto free;
1125	}
1126
1127	return 0;
1128
1129free:
1130	hpre_rsa_clear_ctx(ctx, false);
1131	return ret;
1132}
1133
1134static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1135			      unsigned int keylen)
1136{
1137	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1138	int ret;
1139
1140	ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1141	if (ret)
1142		return ret;
1143
1144	return hpre_rsa_setkey(ctx, key, keylen, false);
1145}
1146
1147static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1148			       unsigned int keylen)
1149{
1150	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1151	int ret;
1152
1153	ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1154	if (ret)
1155		return ret;
1156
1157	return hpre_rsa_setkey(ctx, key, keylen, true);
1158}
1159
1160static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1161{
1162	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1163
1164	/* For 512 and 1536 bits key size, use soft tfm instead */
1165	if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1166	    ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1167		return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1168
1169	return ctx->key_sz;
1170}
1171
1172static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1173{
1174	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1175	int ret;
1176
1177	ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1178	if (IS_ERR(ctx->rsa.soft_tfm)) {
1179		pr_err("Can not alloc_akcipher!\n");
1180		return PTR_ERR(ctx->rsa.soft_tfm);
1181	}
1182
1183	akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) +
1184				  hpre_align_pd());
1185
1186	ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
1187	if (ret)
1188		crypto_free_akcipher(ctx->rsa.soft_tfm);
1189
1190	return ret;
1191}
1192
1193static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1194{
1195	struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1196
1197	hpre_rsa_clear_ctx(ctx, true);
1198	crypto_free_akcipher(ctx->rsa.soft_tfm);
1199}
1200
1201static void hpre_key_to_big_end(u8 *data, int len)
1202{
1203	int i, j;
1204
1205	for (i = 0; i < len / 2; i++) {
1206		j = len - i - 1;
1207		swap(data[j], data[i]);
1208	}
1209}
1210
1211static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
1212			       bool is_ecdh)
1213{
1214	struct device *dev = ctx->dev;
1215	unsigned int sz = ctx->key_sz;
1216	unsigned int shift = sz << 1;
1217
1218	if (is_clear_all)
1219		hisi_qm_stop_qp(ctx->qp);
1220
1221	if (is_ecdh && ctx->ecdh.p) {
1222		/* ecdh: p->a->k->b */
1223		memzero_explicit(ctx->ecdh.p + shift, sz);
1224		dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1225		ctx->ecdh.p = NULL;
1226	} else if (!is_ecdh && ctx->curve25519.p) {
1227		/* curve25519: p->a->k */
1228		memzero_explicit(ctx->curve25519.p + shift, sz);
1229		dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
1230				  ctx->curve25519.dma_p);
1231		ctx->curve25519.p = NULL;
1232	}
1233
1234	hpre_ctx_clear(ctx, is_clear_all);
1235}
1236
1237/*
1238 * The bits of 192/224/256/384/521 are supported by HPRE,
1239 * and convert the bits like:
1240 * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
1241 * If the parameter bit width is insufficient, then we fill in the
1242 * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
1243 */
1244static unsigned int hpre_ecdh_supported_curve(unsigned short id)
1245{
1246	switch (id) {
1247	case ECC_CURVE_NIST_P192:
1248	case ECC_CURVE_NIST_P256:
1249		return HPRE_ECC_HW256_KSZ_B;
1250	case ECC_CURVE_NIST_P384:
1251		return HPRE_ECC_HW384_KSZ_B;
1252	default:
1253		break;
1254	}
1255
1256	return 0;
1257}
1258
1259static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
1260{
1261	unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
1262	u8 i = 0;
1263
1264	while (i < ndigits - 1) {
1265		memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
1266		i++;
1267	}
1268
1269	memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
1270	hpre_key_to_big_end((u8 *)addr, cur_sz);
1271}
1272
1273static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
1274				unsigned int cur_sz)
1275{
1276	unsigned int shifta = ctx->key_sz << 1;
1277	unsigned int shiftb = ctx->key_sz << 2;
1278	void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
1279	void *a = ctx->ecdh.p + shifta - cur_sz;
1280	void *b = ctx->ecdh.p + shiftb - cur_sz;
1281	void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
1282	void *y = ctx->ecdh.g + shifta - cur_sz;
1283	const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
1284	char *n;
1285
1286	if (unlikely(!curve))
1287		return -EINVAL;
1288
1289	n = kzalloc(ctx->key_sz, GFP_KERNEL);
1290	if (!n)
1291		return -ENOMEM;
1292
1293	fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
1294	fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
1295	fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
1296	fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
1297	fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
1298	fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
1299
1300	if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
1301		kfree(n);
1302		return -EINVAL;
1303	}
1304
1305	kfree(n);
1306	return 0;
1307}
1308
1309static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
1310{
1311	switch (id) {
1312	case ECC_CURVE_NIST_P192:
1313		return HPRE_ECC_NIST_P192_N_SIZE;
1314	case ECC_CURVE_NIST_P256:
1315		return HPRE_ECC_NIST_P256_N_SIZE;
1316	case ECC_CURVE_NIST_P384:
1317		return HPRE_ECC_NIST_P384_N_SIZE;
1318	default:
1319		break;
1320	}
1321
1322	return 0;
1323}
1324
1325static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
1326{
1327	struct device *dev = ctx->dev;
1328	unsigned int sz, shift, curve_sz;
1329	int ret;
1330
1331	ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
1332	if (!ctx->key_sz)
1333		return -EINVAL;
1334
1335	curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1336	if (!curve_sz || params->key_size > curve_sz)
1337		return -EINVAL;
1338
1339	sz = ctx->key_sz;
1340
1341	if (!ctx->ecdh.p) {
1342		ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
1343						 GFP_KERNEL);
1344		if (!ctx->ecdh.p)
1345			return -ENOMEM;
1346	}
1347
1348	shift = sz << 2;
1349	ctx->ecdh.g = ctx->ecdh.p + shift;
1350	ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
1351
1352	ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
1353	if (ret) {
1354		dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
1355		dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1356		ctx->ecdh.p = NULL;
1357		return ret;
1358	}
1359
1360	return 0;
1361}
1362
1363static bool hpre_key_is_zero(char *key, unsigned short key_sz)
1364{
1365	int i;
1366
1367	for (i = 0; i < key_sz; i++)
1368		if (key[i])
1369			return false;
1370
1371	return true;
1372}
1373
1374static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
1375{
1376	struct device *dev = ctx->dev;
1377	int ret;
1378
1379	ret = crypto_get_default_rng();
1380	if (ret) {
1381		dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
1382		return ret;
1383	}
1384
1385	ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
1386				   params->key_size);
1387	crypto_put_default_rng();
1388	if (ret)
1389		dev_err(dev, "failed to get rng, ret = %d!\n", ret);
1390
1391	return ret;
1392}
1393
1394static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1395				unsigned int len)
1396{
1397	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1398	unsigned int sz, sz_shift, curve_sz;
1399	struct device *dev = ctx->dev;
1400	char key[HPRE_ECC_MAX_KSZ];
1401	struct ecdh params;
1402	int ret;
1403
1404	if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
1405		dev_err(dev, "failed to decode ecdh key!\n");
1406		return -EINVAL;
1407	}
1408
1409	/* Use stdrng to generate private key */
1410	if (!params.key || !params.key_size) {
1411		params.key = key;
1412		curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1413		if (!curve_sz) {
1414			dev_err(dev, "Invalid curve size!\n");
1415			return -EINVAL;
1416		}
1417
1418		params.key_size = curve_sz - 1;
1419		ret = ecdh_gen_privkey(ctx, &params);
1420		if (ret)
1421			return ret;
1422	}
1423
1424	if (hpre_key_is_zero(params.key, params.key_size)) {
1425		dev_err(dev, "Invalid hpre key!\n");
1426		return -EINVAL;
1427	}
1428
1429	hpre_ecc_clear_ctx(ctx, false, true);
1430
1431	ret = hpre_ecdh_set_param(ctx, &params);
1432	if (ret < 0) {
1433		dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
1434		return ret;
1435	}
1436
1437	sz = ctx->key_sz;
1438	sz_shift = (sz << 1) + sz - params.key_size;
1439	memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
1440
1441	return 0;
1442}
1443
1444static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
1445				      struct hpre_asym_request *req,
1446				      struct scatterlist *dst,
1447				      struct scatterlist *src)
1448{
1449	struct device *dev = ctx->dev;
1450	struct hpre_sqe *sqe = &req->req;
1451	dma_addr_t dma;
1452
1453	dma = le64_to_cpu(sqe->in);
1454	if (unlikely(dma_mapping_error(dev, dma)))
1455		return;
1456
1457	if (src && req->src)
1458		dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
1459
1460	dma = le64_to_cpu(sqe->out);
1461	if (unlikely(dma_mapping_error(dev, dma)))
1462		return;
1463
1464	if (req->dst)
1465		dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
1466	if (dst)
1467		dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
1468}
1469
1470static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
1471{
1472	unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1473	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1474	struct hpre_asym_request *req = NULL;
1475	struct kpp_request *areq;
1476	u64 overtime_thrhld;
1477	char *p;
1478	int ret;
1479
1480	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1481	areq = req->areq.ecdh;
1482	areq->dst_len = ctx->key_sz << 1;
1483
1484	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1485	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1486		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1487
1488	p = sg_virt(areq->dst);
1489	memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
1490	memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
1491
1492	hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1493	kpp_request_complete(areq, ret);
1494
1495	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1496}
1497
1498static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
1499				     struct kpp_request *req)
1500{
1501	struct hpre_asym_request *h_req;
1502	struct hpre_sqe *msg;
1503	int req_id;
1504	void *tmp;
1505
1506	if (req->dst_len < ctx->key_sz << 1) {
1507		req->dst_len = ctx->key_sz << 1;
1508		return -EINVAL;
1509	}
1510
1511	tmp = kpp_request_ctx(req);
1512	h_req = PTR_ALIGN(tmp, hpre_align_sz());
1513	h_req->cb = hpre_ecdh_cb;
1514	h_req->areq.ecdh = req;
1515	msg = &h_req->req;
1516	memset(msg, 0, sizeof(*msg));
1517	msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1518	msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1519	msg->key = cpu_to_le64(ctx->ecdh.dma_p);
1520
1521	msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1522	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1523	h_req->ctx = ctx;
1524
1525	req_id = hpre_add_req_to_ctx(h_req);
1526	if (req_id < 0)
1527		return -EBUSY;
1528
1529	msg->tag = cpu_to_le16((u16)req_id);
1530	return 0;
1531}
1532
1533static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
1534				   struct scatterlist *data, unsigned int len)
1535{
1536	struct hpre_sqe *msg = &hpre_req->req;
1537	struct hpre_ctx *ctx = hpre_req->ctx;
1538	struct device *dev = ctx->dev;
1539	unsigned int tmpshift;
1540	dma_addr_t dma = 0;
1541	void *ptr;
1542	int shift;
1543
1544	/* Src_data include gx and gy. */
1545	shift = ctx->key_sz - (len >> 1);
1546	if (unlikely(shift < 0))
1547		return -EINVAL;
1548
1549	ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
1550	if (unlikely(!ptr))
1551		return -ENOMEM;
1552
1553	tmpshift = ctx->key_sz << 1;
1554	scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
1555	memcpy(ptr + shift, ptr + tmpshift, len >> 1);
1556	memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
1557
1558	hpre_req->src = ptr;
1559	msg->in = cpu_to_le64(dma);
1560	return 0;
1561}
1562
1563static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
1564				   struct scatterlist *data, unsigned int len)
1565{
1566	struct hpre_sqe *msg = &hpre_req->req;
1567	struct hpre_ctx *ctx = hpre_req->ctx;
1568	struct device *dev = ctx->dev;
1569	dma_addr_t dma;
1570
1571	if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
1572		dev_err(dev, "data or data length is illegal!\n");
1573		return -EINVAL;
1574	}
1575
1576	hpre_req->dst = NULL;
1577	dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1578	if (unlikely(dma_mapping_error(dev, dma))) {
1579		dev_err(dev, "dma map data err!\n");
1580		return -ENOMEM;
1581	}
1582
1583	msg->out = cpu_to_le64(dma);
1584	return 0;
1585}
1586
1587static int hpre_ecdh_compute_value(struct kpp_request *req)
1588{
1589	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1590	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1591	struct device *dev = ctx->dev;
1592	void *tmp = kpp_request_ctx(req);
1593	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
1594	struct hpre_sqe *msg = &hpre_req->req;
1595	int ret;
1596
1597	ret = hpre_ecdh_msg_request_set(ctx, req);
1598	if (unlikely(ret)) {
1599		dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
1600		return ret;
1601	}
1602
1603	if (req->src) {
1604		ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
1605		if (unlikely(ret)) {
1606			dev_err(dev, "failed to init src data, ret = %d!\n", ret);
1607			goto clear_all;
1608		}
1609	} else {
1610		msg->in = cpu_to_le64(ctx->ecdh.dma_g);
1611	}
1612
1613	ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
1614	if (unlikely(ret)) {
1615		dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1616		goto clear_all;
1617	}
1618
1619	msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
1620	ret = hpre_send(ctx, msg);
1621	if (likely(!ret))
1622		return -EINPROGRESS;
1623
1624clear_all:
1625	hpre_rm_req_from_ctx(hpre_req);
1626	hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1627	return ret;
1628}
1629
1630static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
1631{
1632	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1633
1634	/* max size is the pub_key_size, include x and y */
1635	return ctx->key_sz << 1;
1636}
1637
1638static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
1639{
1640	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1641
1642	ctx->curve_id = ECC_CURVE_NIST_P192;
1643
1644	kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1645
1646	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1647}
1648
1649static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
1650{
1651	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1652
1653	ctx->curve_id = ECC_CURVE_NIST_P256;
1654
1655	kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1656
1657	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1658}
1659
1660static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
1661{
1662	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1663
1664	ctx->curve_id = ECC_CURVE_NIST_P384;
1665
1666	kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1667
1668	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1669}
1670
1671static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
1672{
1673	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1674
1675	hpre_ecc_clear_ctx(ctx, true, true);
1676}
1677
1678static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
1679				       unsigned int len)
1680{
1681	u8 secret[CURVE25519_KEY_SIZE] = { 0 };
1682	unsigned int sz = ctx->key_sz;
1683	const struct ecc_curve *curve;
1684	unsigned int shift = sz << 1;
1685	void *p;
1686
1687	/*
1688	 * The key from 'buf' is in little-endian, we should preprocess it as
1689	 * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
1690	 * then convert it to big endian. Only in this way, the result can be
1691	 * the same as the software curve-25519 that exists in crypto.
1692	 */
1693	memcpy(secret, buf, len);
1694	curve25519_clamp_secret(secret);
1695	hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
1696
1697	p = ctx->curve25519.p + sz - len;
1698
1699	curve = ecc_get_curve25519();
1700
1701	/* fill curve parameters */
1702	fill_curve_param(p, curve->p, len, curve->g.ndigits);
1703	fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
1704	memcpy(p + shift, secret, len);
1705	fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
1706	memzero_explicit(secret, CURVE25519_KEY_SIZE);
1707}
1708
1709static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
1710				     unsigned int len)
1711{
1712	struct device *dev = ctx->dev;
1713	unsigned int sz = ctx->key_sz;
1714	unsigned int shift = sz << 1;
1715
1716	/* p->a->k->gx */
1717	if (!ctx->curve25519.p) {
1718		ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
1719						       &ctx->curve25519.dma_p,
1720						       GFP_KERNEL);
1721		if (!ctx->curve25519.p)
1722			return -ENOMEM;
1723	}
1724
1725	ctx->curve25519.g = ctx->curve25519.p + shift + sz;
1726	ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
1727
1728	hpre_curve25519_fill_curve(ctx, buf, len);
1729
1730	return 0;
1731}
1732
1733static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
1734				      unsigned int len)
1735{
1736	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1737	struct device *dev = ctx->dev;
1738	int ret = -EINVAL;
1739
1740	if (len != CURVE25519_KEY_SIZE ||
1741	    !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1742		dev_err(dev, "key is null or key len is not 32bytes!\n");
1743		return ret;
1744	}
1745
1746	/* Free old secret if any */
1747	hpre_ecc_clear_ctx(ctx, false, false);
1748
1749	ctx->key_sz = CURVE25519_KEY_SIZE;
1750	ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
1751	if (ret) {
1752		dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
1753		hpre_ecc_clear_ctx(ctx, false, false);
1754		return ret;
1755	}
1756
1757	return 0;
1758}
1759
1760static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
1761					    struct hpre_asym_request *req,
1762					    struct scatterlist *dst,
1763					    struct scatterlist *src)
1764{
1765	struct device *dev = ctx->dev;
1766	struct hpre_sqe *sqe = &req->req;
1767	dma_addr_t dma;
1768
1769	dma = le64_to_cpu(sqe->in);
1770	if (unlikely(dma_mapping_error(dev, dma)))
1771		return;
1772
1773	if (src && req->src)
1774		dma_free_coherent(dev, ctx->key_sz, req->src, dma);
1775
1776	dma = le64_to_cpu(sqe->out);
1777	if (unlikely(dma_mapping_error(dev, dma)))
1778		return;
1779
1780	if (req->dst)
1781		dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
1782	if (dst)
1783		dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
1784}
1785
1786static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
1787{
1788	struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1789	struct hpre_asym_request *req = NULL;
1790	struct kpp_request *areq;
1791	u64 overtime_thrhld;
1792	int ret;
1793
1794	ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1795	areq = req->areq.curve25519;
1796	areq->dst_len = ctx->key_sz;
1797
1798	overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1799	if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1800		atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1801
1802	hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
1803
1804	hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1805	kpp_request_complete(areq, ret);
1806
1807	atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1808}
1809
1810static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
1811					   struct kpp_request *req)
1812{
1813	struct hpre_asym_request *h_req;
1814	struct hpre_sqe *msg;
1815	int req_id;
1816	void *tmp;
1817
1818	if (unlikely(req->dst_len < ctx->key_sz)) {
1819		req->dst_len = ctx->key_sz;
1820		return -EINVAL;
1821	}
1822
1823	tmp = kpp_request_ctx(req);
1824	h_req = PTR_ALIGN(tmp, hpre_align_sz());
1825	h_req->cb = hpre_curve25519_cb;
1826	h_req->areq.curve25519 = req;
1827	msg = &h_req->req;
1828	memset(msg, 0, sizeof(*msg));
1829	msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1830	msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1831	msg->key = cpu_to_le64(ctx->curve25519.dma_p);
1832
1833	msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1834	msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1835	h_req->ctx = ctx;
1836
1837	req_id = hpre_add_req_to_ctx(h_req);
1838	if (req_id < 0)
1839		return -EBUSY;
1840
1841	msg->tag = cpu_to_le16((u16)req_id);
1842	return 0;
1843}
1844
1845static void hpre_curve25519_src_modulo_p(u8 *ptr)
1846{
1847	int i;
1848
1849	for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
1850		ptr[i] = 0;
1851
1852	/* The modulus is ptr's last byte minus '0xed'(last byte of p) */
1853	ptr[i] -= 0xed;
1854}
1855
1856static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
1857				    struct scatterlist *data, unsigned int len)
1858{
1859	struct hpre_sqe *msg = &hpre_req->req;
1860	struct hpre_ctx *ctx = hpre_req->ctx;
1861	struct device *dev = ctx->dev;
1862	u8 p[CURVE25519_KEY_SIZE] = { 0 };
1863	const struct ecc_curve *curve;
1864	dma_addr_t dma = 0;
1865	u8 *ptr;
1866
1867	if (len != CURVE25519_KEY_SIZE) {
1868		dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
1869		return -EINVAL;
1870	}
1871
1872	ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
1873	if (unlikely(!ptr))
1874		return -ENOMEM;
1875
1876	scatterwalk_map_and_copy(ptr, data, 0, len, 0);
1877
1878	if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1879		dev_err(dev, "gx is null!\n");
1880		goto err;
1881	}
1882
1883	/*
1884	 * Src_data(gx) is in little-endian order, MSB in the final byte should
1885	 * be masked as described in RFC7748, then transform it to big-endian
1886	 * form, then hisi_hpre can use the data.
1887	 */
1888	ptr[31] &= 0x7f;
1889	hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
1890
1891	curve = ecc_get_curve25519();
1892
1893	fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
1894
1895	/*
1896	 * When src_data equals (2^255 - 19) ~  (2^255 - 1), it is out of p,
1897	 * we get its modulus to p, and then use it.
1898	 */
1899	if (memcmp(ptr, p, ctx->key_sz) == 0) {
1900		dev_err(dev, "gx is p!\n");
1901		goto err;
1902	} else if (memcmp(ptr, p, ctx->key_sz) > 0) {
1903		hpre_curve25519_src_modulo_p(ptr);
1904	}
1905
1906	hpre_req->src = ptr;
1907	msg->in = cpu_to_le64(dma);
1908	return 0;
1909
1910err:
1911	dma_free_coherent(dev, ctx->key_sz, ptr, dma);
1912	return -EINVAL;
1913}
1914
1915static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
1916				    struct scatterlist *data, unsigned int len)
1917{
1918	struct hpre_sqe *msg = &hpre_req->req;
1919	struct hpre_ctx *ctx = hpre_req->ctx;
1920	struct device *dev = ctx->dev;
1921	dma_addr_t dma;
1922
1923	if (!data || !sg_is_last(data) || len != ctx->key_sz) {
1924		dev_err(dev, "data or data length is illegal!\n");
1925		return -EINVAL;
1926	}
1927
1928	hpre_req->dst = NULL;
1929	dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1930	if (unlikely(dma_mapping_error(dev, dma))) {
1931		dev_err(dev, "dma map data err!\n");
1932		return -ENOMEM;
1933	}
1934
1935	msg->out = cpu_to_le64(dma);
1936	return 0;
1937}
1938
1939static int hpre_curve25519_compute_value(struct kpp_request *req)
1940{
1941	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1942	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1943	struct device *dev = ctx->dev;
1944	void *tmp = kpp_request_ctx(req);
1945	struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
1946	struct hpre_sqe *msg = &hpre_req->req;
1947	int ret;
1948
1949	ret = hpre_curve25519_msg_request_set(ctx, req);
1950	if (unlikely(ret)) {
1951		dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
1952		return ret;
1953	}
1954
1955	if (req->src) {
1956		ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
1957		if (unlikely(ret)) {
1958			dev_err(dev, "failed to init src data, ret = %d!\n",
1959				ret);
1960			goto clear_all;
1961		}
1962	} else {
1963		msg->in = cpu_to_le64(ctx->curve25519.dma_g);
1964	}
1965
1966	ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
1967	if (unlikely(ret)) {
1968		dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1969		goto clear_all;
1970	}
1971
1972	msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
1973	ret = hpre_send(ctx, msg);
1974	if (likely(!ret))
1975		return -EINPROGRESS;
1976
1977clear_all:
1978	hpre_rm_req_from_ctx(hpre_req);
1979	hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1980	return ret;
1981}
1982
1983static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
1984{
1985	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1986
1987	return ctx->key_sz;
1988}
1989
1990static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
1991{
1992	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1993
1994	kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
1995
1996	return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1997}
1998
1999static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
2000{
2001	struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
2002
2003	hpre_ecc_clear_ctx(ctx, true, false);
2004}
2005
2006static struct akcipher_alg rsa = {
2007	.sign = hpre_rsa_dec,
2008	.verify = hpre_rsa_enc,
2009	.encrypt = hpre_rsa_enc,
2010	.decrypt = hpre_rsa_dec,
2011	.set_pub_key = hpre_rsa_setpubkey,
2012	.set_priv_key = hpre_rsa_setprivkey,
2013	.max_size = hpre_rsa_max_size,
2014	.init = hpre_rsa_init_tfm,
2015	.exit = hpre_rsa_exit_tfm,
2016	.base = {
2017		.cra_ctxsize = sizeof(struct hpre_ctx),
2018		.cra_priority = HPRE_CRYPTO_ALG_PRI,
2019		.cra_name = "rsa",
2020		.cra_driver_name = "hpre-rsa",
2021		.cra_module = THIS_MODULE,
2022	},
2023};
2024
2025static struct kpp_alg dh = {
2026	.set_secret = hpre_dh_set_secret,
2027	.generate_public_key = hpre_dh_compute_value,
2028	.compute_shared_secret = hpre_dh_compute_value,
2029	.max_size = hpre_dh_max_size,
2030	.init = hpre_dh_init_tfm,
2031	.exit = hpre_dh_exit_tfm,
2032	.base = {
2033		.cra_ctxsize = sizeof(struct hpre_ctx),
2034		.cra_priority = HPRE_CRYPTO_ALG_PRI,
2035		.cra_name = "dh",
2036		.cra_driver_name = "hpre-dh",
2037		.cra_module = THIS_MODULE,
2038	},
2039};
2040
2041static struct kpp_alg ecdh_curves[] = {
2042	{
2043		.set_secret = hpre_ecdh_set_secret,
2044		.generate_public_key = hpre_ecdh_compute_value,
2045		.compute_shared_secret = hpre_ecdh_compute_value,
2046		.max_size = hpre_ecdh_max_size,
2047		.init = hpre_ecdh_nist_p192_init_tfm,
2048		.exit = hpre_ecdh_exit_tfm,
2049		.base = {
2050			.cra_ctxsize = sizeof(struct hpre_ctx),
2051			.cra_priority = HPRE_CRYPTO_ALG_PRI,
2052			.cra_name = "ecdh-nist-p192",
2053			.cra_driver_name = "hpre-ecdh-nist-p192",
2054			.cra_module = THIS_MODULE,
2055		},
2056	}, {
2057		.set_secret = hpre_ecdh_set_secret,
2058		.generate_public_key = hpre_ecdh_compute_value,
2059		.compute_shared_secret = hpre_ecdh_compute_value,
2060		.max_size = hpre_ecdh_max_size,
2061		.init = hpre_ecdh_nist_p256_init_tfm,
2062		.exit = hpre_ecdh_exit_tfm,
2063		.base = {
2064			.cra_ctxsize = sizeof(struct hpre_ctx),
2065			.cra_priority = HPRE_CRYPTO_ALG_PRI,
2066			.cra_name = "ecdh-nist-p256",
2067			.cra_driver_name = "hpre-ecdh-nist-p256",
2068			.cra_module = THIS_MODULE,
2069		},
2070	}, {
2071		.set_secret = hpre_ecdh_set_secret,
2072		.generate_public_key = hpre_ecdh_compute_value,
2073		.compute_shared_secret = hpre_ecdh_compute_value,
2074		.max_size = hpre_ecdh_max_size,
2075		.init = hpre_ecdh_nist_p384_init_tfm,
2076		.exit = hpre_ecdh_exit_tfm,
2077		.base = {
2078			.cra_ctxsize = sizeof(struct hpre_ctx),
2079			.cra_priority = HPRE_CRYPTO_ALG_PRI,
2080			.cra_name = "ecdh-nist-p384",
2081			.cra_driver_name = "hpre-ecdh-nist-p384",
2082			.cra_module = THIS_MODULE,
2083		},
2084	}
2085};
2086
2087static struct kpp_alg curve25519_alg = {
2088	.set_secret = hpre_curve25519_set_secret,
2089	.generate_public_key = hpre_curve25519_compute_value,
2090	.compute_shared_secret = hpre_curve25519_compute_value,
2091	.max_size = hpre_curve25519_max_size,
2092	.init = hpre_curve25519_init_tfm,
2093	.exit = hpre_curve25519_exit_tfm,
2094	.base = {
2095		.cra_ctxsize = sizeof(struct hpre_ctx),
2096		.cra_priority = HPRE_CRYPTO_ALG_PRI,
2097		.cra_name = "curve25519",
2098		.cra_driver_name = "hpre-curve25519",
2099		.cra_module = THIS_MODULE,
2100	},
2101};
2102
2103static int hpre_register_rsa(struct hisi_qm *qm)
2104{
2105	int ret;
2106
2107	if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
2108		return 0;
2109
2110	rsa.base.cra_flags = 0;
2111	ret = crypto_register_akcipher(&rsa);
2112	if (ret)
2113		dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
2114
2115	return ret;
2116}
2117
2118static void hpre_unregister_rsa(struct hisi_qm *qm)
2119{
2120	if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
2121		return;
2122
2123	crypto_unregister_akcipher(&rsa);
2124}
2125
2126static int hpre_register_dh(struct hisi_qm *qm)
2127{
2128	int ret;
2129
2130	if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
2131		return 0;
2132
2133	ret = crypto_register_kpp(&dh);
2134	if (ret)
2135		dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
2136
2137	return ret;
2138}
2139
2140static void hpre_unregister_dh(struct hisi_qm *qm)
2141{
2142	if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
2143		return;
2144
2145	crypto_unregister_kpp(&dh);
2146}
2147
2148static int hpre_register_ecdh(struct hisi_qm *qm)
2149{
2150	int ret, i;
2151
2152	if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
2153		return 0;
2154
2155	for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
2156		ret = crypto_register_kpp(&ecdh_curves[i]);
2157		if (ret) {
2158			dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
2159				ecdh_curves[i].base.cra_name, ret);
2160			goto unreg_kpp;
2161		}
2162	}
2163
2164	return 0;
2165
2166unreg_kpp:
2167	for (--i; i >= 0; --i)
2168		crypto_unregister_kpp(&ecdh_curves[i]);
2169
2170	return ret;
2171}
2172
2173static void hpre_unregister_ecdh(struct hisi_qm *qm)
2174{
2175	int i;
2176
2177	if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
2178		return;
2179
2180	for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
2181		crypto_unregister_kpp(&ecdh_curves[i]);
2182}
2183
2184static int hpre_register_x25519(struct hisi_qm *qm)
2185{
2186	int ret;
2187
2188	if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
2189		return 0;
2190
2191	ret = crypto_register_kpp(&curve25519_alg);
2192	if (ret)
2193		dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
2194
2195	return ret;
2196}
2197
2198static void hpre_unregister_x25519(struct hisi_qm *qm)
2199{
2200	if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
2201		return;
2202
2203	crypto_unregister_kpp(&curve25519_alg);
2204}
2205
2206int hpre_algs_register(struct hisi_qm *qm)
2207{
2208	int ret = 0;
2209
2210	mutex_lock(&hpre_algs_lock);
2211	if (hpre_available_devs) {
2212		hpre_available_devs++;
2213		goto unlock;
2214	}
2215
2216	ret = hpre_register_rsa(qm);
2217	if (ret)
2218		goto unlock;
2219
2220	ret = hpre_register_dh(qm);
2221	if (ret)
2222		goto unreg_rsa;
2223
2224	ret = hpre_register_ecdh(qm);
2225	if (ret)
2226		goto unreg_dh;
2227
2228	ret = hpre_register_x25519(qm);
2229	if (ret)
2230		goto unreg_ecdh;
2231
2232	hpre_available_devs++;
2233	mutex_unlock(&hpre_algs_lock);
2234
2235	return ret;
2236
2237unreg_ecdh:
2238	hpre_unregister_ecdh(qm);
2239unreg_dh:
2240	hpre_unregister_dh(qm);
2241unreg_rsa:
2242	hpre_unregister_rsa(qm);
2243unlock:
2244	mutex_unlock(&hpre_algs_lock);
2245	return ret;
2246}
2247
2248void hpre_algs_unregister(struct hisi_qm *qm)
2249{
2250	mutex_lock(&hpre_algs_lock);
2251	if (--hpre_available_devs)
2252		goto unlock;
2253
2254	hpre_unregister_x25519(qm);
2255	hpre_unregister_ecdh(qm);
2256	hpre_unregister_dh(qm);
2257	hpre_unregister_rsa(qm);
2258
2259unlock:
2260	mutex_unlock(&hpre_algs_lock);
2261}
2262