1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Mediated virtual PCI display host device driver
4 *
5 * See mdpy-defs.h for device specs
6 *
7 *   (c) Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * based on mtty driver which is:
10 *   Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
11 *	 Author: Neo Jia <cjia@nvidia.com>
12 *		 Kirti Wankhede <kwankhede@nvidia.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/cdev.h>
24#include <linux/vfio.h>
25#include <linux/iommu.h>
26#include <linux/sysfs.h>
27#include <linux/mdev.h>
28#include <linux/pci.h>
29#include <drm/drm_fourcc.h>
30#include "mdpy-defs.h"
31
32#define MDPY_NAME		"mdpy"
33#define MDPY_CLASS_NAME		"mdpy"
34
35#define MDPY_CONFIG_SPACE_SIZE	0xff
36#define MDPY_MEMORY_BAR_OFFSET	PAGE_SIZE
37#define MDPY_DISPLAY_REGION	16
38
39#define STORE_LE16(addr, val)	(*(u16 *)addr = val)
40#define STORE_LE32(addr, val)	(*(u32 *)addr = val)
41
42
43MODULE_LICENSE("GPL v2");
44
45#define MDPY_TYPE_1 "vga"
46#define MDPY_TYPE_2 "xga"
47#define MDPY_TYPE_3 "hd"
48
49static struct mdpy_type {
50	struct mdev_type type;
51	u32 format;
52	u32 bytepp;
53	u32 width;
54	u32 height;
55} mdpy_types[] = {
56	{
57		.type.sysfs_name 	= MDPY_TYPE_1,
58		.type.pretty_name	= MDPY_CLASS_NAME "-" MDPY_TYPE_1,
59		.format = DRM_FORMAT_XRGB8888,
60		.bytepp = 4,
61		.width	= 640,
62		.height = 480,
63	}, {
64		.type.sysfs_name 	= MDPY_TYPE_2,
65		.type.pretty_name	= MDPY_CLASS_NAME "-" MDPY_TYPE_2,
66		.format = DRM_FORMAT_XRGB8888,
67		.bytepp = 4,
68		.width	= 1024,
69		.height = 768,
70	}, {
71		.type.sysfs_name 	= MDPY_TYPE_3,
72		.type.pretty_name	= MDPY_CLASS_NAME "-" MDPY_TYPE_3,
73		.format = DRM_FORMAT_XRGB8888,
74		.bytepp = 4,
75		.width	= 1920,
76		.height = 1080,
77	},
78};
79
80static struct mdev_type *mdpy_mdev_types[] = {
81	&mdpy_types[0].type,
82	&mdpy_types[1].type,
83	&mdpy_types[2].type,
84};
85
86static dev_t		mdpy_devt;
87static const struct class mdpy_class = {
88	.name = MDPY_CLASS_NAME,
89};
90static struct cdev	mdpy_cdev;
91static struct device	mdpy_dev;
92static struct mdev_parent mdpy_parent;
93static const struct vfio_device_ops mdpy_dev_ops;
94
95/* State of each mdev device */
96struct mdev_state {
97	struct vfio_device vdev;
98	u8 *vconfig;
99	u32 bar_mask;
100	struct mutex ops_lock;
101	struct mdev_device *mdev;
102	struct vfio_device_info dev_info;
103
104	const struct mdpy_type *type;
105	u32 memsize;
106	void *memblk;
107};
108
109static void mdpy_create_config_space(struct mdev_state *mdev_state)
110{
111	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
112		   MDPY_PCI_VENDOR_ID);
113	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
114		   MDPY_PCI_DEVICE_ID);
115	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
116		   MDPY_PCI_SUBVENDOR_ID);
117	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
118		   MDPY_PCI_SUBDEVICE_ID);
119
120	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
121		   PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
122	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
123		   PCI_STATUS_CAP_LIST);
124	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
125		   PCI_CLASS_DISPLAY_OTHER);
126	mdev_state->vconfig[PCI_CLASS_REVISION] =  0x01;
127
128	STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
129		   PCI_BASE_ADDRESS_SPACE_MEMORY |
130		   PCI_BASE_ADDRESS_MEM_TYPE_32	 |
131		   PCI_BASE_ADDRESS_MEM_PREFETCH);
132	mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
133
134	/* vendor specific capability for the config registers */
135	mdev_state->vconfig[PCI_CAPABILITY_LIST]       = MDPY_VENDORCAP_OFFSET;
136	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
137	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
138	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
139	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
140		   mdev_state->type->format);
141	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
142		   mdev_state->type->width);
143	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
144		   mdev_state->type->height);
145}
146
147static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
148				 char *buf, u32 count)
149{
150	struct device *dev = mdev_dev(mdev_state->mdev);
151	u32 cfg_addr;
152
153	switch (offset) {
154	case PCI_BASE_ADDRESS_0:
155		cfg_addr = *(u32 *)buf;
156
157		if (cfg_addr == 0xffffffff) {
158			cfg_addr = (cfg_addr & mdev_state->bar_mask);
159		} else {
160			cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
161			if (cfg_addr)
162				dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
163		}
164
165		cfg_addr |= (mdev_state->vconfig[offset] &
166			     ~PCI_BASE_ADDRESS_MEM_MASK);
167		STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
168		break;
169	}
170}
171
172static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
173			   size_t count, loff_t pos, bool is_write)
174{
175	int ret = 0;
176
177	mutex_lock(&mdev_state->ops_lock);
178
179	if (pos < MDPY_CONFIG_SPACE_SIZE) {
180		if (is_write)
181			handle_pci_cfg_write(mdev_state, pos, buf, count);
182		else
183			memcpy(buf, (mdev_state->vconfig + pos), count);
184
185	} else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
186		   (pos + count <=
187		    MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
188		pos -= MDPY_MEMORY_BAR_OFFSET;
189		if (is_write)
190			memcpy(mdev_state->memblk, buf, count);
191		else
192			memcpy(buf, mdev_state->memblk, count);
193
194	} else {
195		dev_info(mdev_state->vdev.dev,
196			 "%s: %s @0x%llx (unhandled)\n", __func__,
197			 is_write ? "WR" : "RD", pos);
198		ret = -1;
199		goto accessfailed;
200	}
201
202	ret = count;
203
204
205accessfailed:
206	mutex_unlock(&mdev_state->ops_lock);
207
208	return ret;
209}
210
211static int mdpy_reset(struct mdev_state *mdev_state)
212{
213	u32 stride, i;
214
215	/* initialize with gray gradient */
216	stride = mdev_state->type->width * mdev_state->type->bytepp;
217	for (i = 0; i < mdev_state->type->height; i++)
218		memset(mdev_state->memblk + i * stride,
219		       i * 255 / mdev_state->type->height,
220		       stride);
221	return 0;
222}
223
224static int mdpy_init_dev(struct vfio_device *vdev)
225{
226	struct mdev_state *mdev_state =
227		container_of(vdev, struct mdev_state, vdev);
228	struct mdev_device *mdev = to_mdev_device(vdev->dev);
229	const struct mdpy_type *type =
230		container_of(mdev->type, struct mdpy_type, type);
231	u32 fbsize;
232	int ret = -ENOMEM;
233
234	mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
235	if (!mdev_state->vconfig)
236		return ret;
237
238	fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
239
240	mdev_state->memblk = vmalloc_user(fbsize);
241	if (!mdev_state->memblk)
242		goto out_vconfig;
243
244	mutex_init(&mdev_state->ops_lock);
245	mdev_state->mdev = mdev;
246	mdev_state->type = type;
247	mdev_state->memsize = fbsize;
248	mdpy_create_config_space(mdev_state);
249	mdpy_reset(mdev_state);
250
251	dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->type.pretty_name,
252		 type->width, type->height);
253	return 0;
254
255out_vconfig:
256	kfree(mdev_state->vconfig);
257	return ret;
258}
259
260static int mdpy_probe(struct mdev_device *mdev)
261{
262	struct mdev_state *mdev_state;
263	int ret;
264
265	mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
266				       &mdpy_dev_ops);
267	if (IS_ERR(mdev_state))
268		return PTR_ERR(mdev_state);
269
270	ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
271	if (ret)
272		goto err_put_vdev;
273	dev_set_drvdata(&mdev->dev, mdev_state);
274	return 0;
275
276err_put_vdev:
277	vfio_put_device(&mdev_state->vdev);
278	return ret;
279}
280
281static void mdpy_release_dev(struct vfio_device *vdev)
282{
283	struct mdev_state *mdev_state =
284		container_of(vdev, struct mdev_state, vdev);
285
286	vfree(mdev_state->memblk);
287	kfree(mdev_state->vconfig);
288}
289
290static void mdpy_remove(struct mdev_device *mdev)
291{
292	struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
293
294	dev_info(&mdev->dev, "%s\n", __func__);
295
296	vfio_unregister_group_dev(&mdev_state->vdev);
297	vfio_put_device(&mdev_state->vdev);
298}
299
300static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
301			 size_t count, loff_t *ppos)
302{
303	struct mdev_state *mdev_state =
304		container_of(vdev, struct mdev_state, vdev);
305	unsigned int done = 0;
306	int ret;
307
308	while (count) {
309		size_t filled;
310
311		if (count >= 4 && !(*ppos % 4)) {
312			u32 val;
313
314			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
315					  *ppos, false);
316			if (ret <= 0)
317				goto read_err;
318
319			if (copy_to_user(buf, &val, sizeof(val)))
320				goto read_err;
321
322			filled = 4;
323		} else if (count >= 2 && !(*ppos % 2)) {
324			u16 val;
325
326			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
327					  *ppos, false);
328			if (ret <= 0)
329				goto read_err;
330
331			if (copy_to_user(buf, &val, sizeof(val)))
332				goto read_err;
333
334			filled = 2;
335		} else {
336			u8 val;
337
338			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
339					  *ppos, false);
340			if (ret <= 0)
341				goto read_err;
342
343			if (copy_to_user(buf, &val, sizeof(val)))
344				goto read_err;
345
346			filled = 1;
347		}
348
349		count -= filled;
350		done += filled;
351		*ppos += filled;
352		buf += filled;
353	}
354
355	return done;
356
357read_err:
358	return -EFAULT;
359}
360
361static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
362			  size_t count, loff_t *ppos)
363{
364	struct mdev_state *mdev_state =
365		container_of(vdev, struct mdev_state, vdev);
366	unsigned int done = 0;
367	int ret;
368
369	while (count) {
370		size_t filled;
371
372		if (count >= 4 && !(*ppos % 4)) {
373			u32 val;
374
375			if (copy_from_user(&val, buf, sizeof(val)))
376				goto write_err;
377
378			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
379					  *ppos, true);
380			if (ret <= 0)
381				goto write_err;
382
383			filled = 4;
384		} else if (count >= 2 && !(*ppos % 2)) {
385			u16 val;
386
387			if (copy_from_user(&val, buf, sizeof(val)))
388				goto write_err;
389
390			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
391					  *ppos, true);
392			if (ret <= 0)
393				goto write_err;
394
395			filled = 2;
396		} else {
397			u8 val;
398
399			if (copy_from_user(&val, buf, sizeof(val)))
400				goto write_err;
401
402			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
403					  *ppos, true);
404			if (ret <= 0)
405				goto write_err;
406
407			filled = 1;
408		}
409		count -= filled;
410		done += filled;
411		*ppos += filled;
412		buf += filled;
413	}
414
415	return done;
416write_err:
417	return -EFAULT;
418}
419
420static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
421{
422	struct mdev_state *mdev_state =
423		container_of(vdev, struct mdev_state, vdev);
424
425	if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
426		return -EINVAL;
427	if (vma->vm_end < vma->vm_start)
428		return -EINVAL;
429	if (vma->vm_end - vma->vm_start > mdev_state->memsize)
430		return -EINVAL;
431	if ((vma->vm_flags & VM_SHARED) == 0)
432		return -EINVAL;
433
434	return remap_vmalloc_range(vma, mdev_state->memblk, 0);
435}
436
437static int mdpy_get_region_info(struct mdev_state *mdev_state,
438				struct vfio_region_info *region_info,
439				u16 *cap_type_id, void **cap_type)
440{
441	if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
442	    region_info->index != MDPY_DISPLAY_REGION)
443		return -EINVAL;
444
445	switch (region_info->index) {
446	case VFIO_PCI_CONFIG_REGION_INDEX:
447		region_info->offset = 0;
448		region_info->size   = MDPY_CONFIG_SPACE_SIZE;
449		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ |
450				       VFIO_REGION_INFO_FLAG_WRITE);
451		break;
452	case VFIO_PCI_BAR0_REGION_INDEX:
453	case MDPY_DISPLAY_REGION:
454		region_info->offset = MDPY_MEMORY_BAR_OFFSET;
455		region_info->size   = mdev_state->memsize;
456		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ  |
457				       VFIO_REGION_INFO_FLAG_WRITE |
458				       VFIO_REGION_INFO_FLAG_MMAP);
459		break;
460	default:
461		region_info->size   = 0;
462		region_info->offset = 0;
463		region_info->flags  = 0;
464	}
465
466	return 0;
467}
468
469static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
470{
471	irq_info->count = 0;
472	return 0;
473}
474
475static int mdpy_get_device_info(struct vfio_device_info *dev_info)
476{
477	dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
478	dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
479	dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
480	return 0;
481}
482
483static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
484				struct vfio_device_gfx_plane_info *plane)
485{
486	if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
487		if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
488				     VFIO_GFX_PLANE_TYPE_REGION))
489			return 0;
490		return -EINVAL;
491	}
492
493	if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
494		return -EINVAL;
495
496	plane->drm_format     = mdev_state->type->format;
497	plane->width	      = mdev_state->type->width;
498	plane->height	      = mdev_state->type->height;
499	plane->stride	      = (mdev_state->type->width *
500				 mdev_state->type->bytepp);
501	plane->size	      = mdev_state->memsize;
502	plane->region_index   = MDPY_DISPLAY_REGION;
503
504	/* unused */
505	plane->drm_format_mod = 0;
506	plane->x_pos	      = 0;
507	plane->y_pos	      = 0;
508	plane->x_hot	      = 0;
509	plane->y_hot	      = 0;
510
511	return 0;
512}
513
514static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
515		       unsigned long arg)
516{
517	int ret = 0;
518	unsigned long minsz;
519	struct mdev_state *mdev_state =
520		container_of(vdev, struct mdev_state, vdev);
521
522	switch (cmd) {
523	case VFIO_DEVICE_GET_INFO:
524	{
525		struct vfio_device_info info;
526
527		minsz = offsetofend(struct vfio_device_info, num_irqs);
528
529		if (copy_from_user(&info, (void __user *)arg, minsz))
530			return -EFAULT;
531
532		if (info.argsz < minsz)
533			return -EINVAL;
534
535		ret = mdpy_get_device_info(&info);
536		if (ret)
537			return ret;
538
539		memcpy(&mdev_state->dev_info, &info, sizeof(info));
540
541		if (copy_to_user((void __user *)arg, &info, minsz))
542			return -EFAULT;
543
544		return 0;
545	}
546	case VFIO_DEVICE_GET_REGION_INFO:
547	{
548		struct vfio_region_info info;
549		u16 cap_type_id = 0;
550		void *cap_type = NULL;
551
552		minsz = offsetofend(struct vfio_region_info, offset);
553
554		if (copy_from_user(&info, (void __user *)arg, minsz))
555			return -EFAULT;
556
557		if (info.argsz < minsz)
558			return -EINVAL;
559
560		ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
561					   &cap_type);
562		if (ret)
563			return ret;
564
565		if (copy_to_user((void __user *)arg, &info, minsz))
566			return -EFAULT;
567
568		return 0;
569	}
570
571	case VFIO_DEVICE_GET_IRQ_INFO:
572	{
573		struct vfio_irq_info info;
574
575		minsz = offsetofend(struct vfio_irq_info, count);
576
577		if (copy_from_user(&info, (void __user *)arg, minsz))
578			return -EFAULT;
579
580		if ((info.argsz < minsz) ||
581		    (info.index >= mdev_state->dev_info.num_irqs))
582			return -EINVAL;
583
584		ret = mdpy_get_irq_info(&info);
585		if (ret)
586			return ret;
587
588		if (copy_to_user((void __user *)arg, &info, minsz))
589			return -EFAULT;
590
591		return 0;
592	}
593
594	case VFIO_DEVICE_QUERY_GFX_PLANE:
595	{
596		struct vfio_device_gfx_plane_info plane = {};
597
598		minsz = offsetofend(struct vfio_device_gfx_plane_info,
599				    region_index);
600
601		if (copy_from_user(&plane, (void __user *)arg, minsz))
602			return -EFAULT;
603
604		if (plane.argsz < minsz)
605			return -EINVAL;
606
607		ret = mdpy_query_gfx_plane(mdev_state, &plane);
608		if (ret)
609			return ret;
610
611		if (copy_to_user((void __user *)arg, &plane, minsz))
612			return -EFAULT;
613
614		return 0;
615	}
616
617	case VFIO_DEVICE_SET_IRQS:
618		return -EINVAL;
619
620	case VFIO_DEVICE_RESET:
621		return mdpy_reset(mdev_state);
622	}
623	return -ENOTTY;
624}
625
626static ssize_t
627resolution_show(struct device *dev, struct device_attribute *attr,
628		char *buf)
629{
630	struct mdev_state *mdev_state = dev_get_drvdata(dev);
631
632	return sprintf(buf, "%dx%d\n",
633		       mdev_state->type->width,
634		       mdev_state->type->height);
635}
636static DEVICE_ATTR_RO(resolution);
637
638static struct attribute *mdev_dev_attrs[] = {
639	&dev_attr_resolution.attr,
640	NULL,
641};
642
643static const struct attribute_group mdev_dev_group = {
644	.name  = "vendor",
645	.attrs = mdev_dev_attrs,
646};
647
648static const struct attribute_group *mdev_dev_groups[] = {
649	&mdev_dev_group,
650	NULL,
651};
652
653static ssize_t mdpy_show_description(struct mdev_type *mtype, char *buf)
654{
655	struct mdpy_type *type = container_of(mtype, struct mdpy_type, type);
656
657	return sprintf(buf, "virtual display, %dx%d framebuffer\n",
658		       type->width, type->height);
659}
660
661static const struct vfio_device_ops mdpy_dev_ops = {
662	.init = mdpy_init_dev,
663	.release = mdpy_release_dev,
664	.read = mdpy_read,
665	.write = mdpy_write,
666	.ioctl = mdpy_ioctl,
667	.mmap = mdpy_mmap,
668	.bind_iommufd	= vfio_iommufd_emulated_bind,
669	.unbind_iommufd	= vfio_iommufd_emulated_unbind,
670	.attach_ioas	= vfio_iommufd_emulated_attach_ioas,
671	.detach_ioas	= vfio_iommufd_emulated_detach_ioas,
672};
673
674static struct mdev_driver mdpy_driver = {
675	.device_api = VFIO_DEVICE_API_PCI_STRING,
676	.max_instances = 4,
677	.driver = {
678		.name = "mdpy",
679		.owner = THIS_MODULE,
680		.mod_name = KBUILD_MODNAME,
681		.dev_groups = mdev_dev_groups,
682	},
683	.probe = mdpy_probe,
684	.remove	= mdpy_remove,
685	.show_description = mdpy_show_description,
686};
687
688static const struct file_operations vd_fops = {
689	.owner		= THIS_MODULE,
690};
691
692static void mdpy_device_release(struct device *dev)
693{
694	/* nothing */
695}
696
697static int __init mdpy_dev_init(void)
698{
699	int ret = 0;
700
701	ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
702	if (ret < 0) {
703		pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
704		return ret;
705	}
706	cdev_init(&mdpy_cdev, &vd_fops);
707	cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
708	pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
709
710	ret = mdev_register_driver(&mdpy_driver);
711	if (ret)
712		goto err_cdev;
713
714	ret = class_register(&mdpy_class);
715	if (ret)
716		goto err_driver;
717	mdpy_dev.class = &mdpy_class;
718	mdpy_dev.release = mdpy_device_release;
719	dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
720
721	ret = device_register(&mdpy_dev);
722	if (ret)
723		goto err_put;
724
725	ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver,
726				   mdpy_mdev_types,
727				   ARRAY_SIZE(mdpy_mdev_types));
728	if (ret)
729		goto err_device;
730
731	return 0;
732
733err_device:
734	device_del(&mdpy_dev);
735err_put:
736	put_device(&mdpy_dev);
737	class_unregister(&mdpy_class);
738err_driver:
739	mdev_unregister_driver(&mdpy_driver);
740err_cdev:
741	cdev_del(&mdpy_cdev);
742	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
743	return ret;
744}
745
746static void __exit mdpy_dev_exit(void)
747{
748	mdpy_dev.bus = NULL;
749	mdev_unregister_parent(&mdpy_parent);
750
751	device_unregister(&mdpy_dev);
752	mdev_unregister_driver(&mdpy_driver);
753	cdev_del(&mdpy_cdev);
754	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
755	class_unregister(&mdpy_class);
756}
757
758module_param_named(count, mdpy_driver.max_instances, int, 0444);
759MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
760
761module_init(mdpy_dev_init)
762module_exit(mdpy_dev_exit)
763