1/*
2 * RTC subsystem, dev interface
3 *
4 * Copyright (C) 2005 Tower Technologies
5 * Author: Alessandro Zummo <a.zummo@towertech.it>
6 *
7 * based on arch/arm/common/rtctime.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#include <linux/module.h>
15#include <linux/rtc.h>
16#include "rtc-core.h"
17
18static dev_t rtc_devt;
19
20#define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
21
22static int rtc_dev_open(struct inode *inode, struct file *file)
23{
24	int err;
25	struct rtc_device *rtc = container_of(inode->i_cdev,
26					struct rtc_device, char_dev);
27	const struct rtc_class_ops *ops = rtc->ops;
28
29	/* We keep the lock as long as the device is in use
30	 * and return immediately if busy
31	 */
32	if (!(mutex_trylock(&rtc->char_lock)))
33		return -EBUSY;
34
35	file->private_data = rtc;
36
37	err = ops->open ? ops->open(rtc->dev.parent) : 0;
38	if (err == 0) {
39		spin_lock_irq(&rtc->irq_lock);
40		rtc->irq_data = 0;
41		spin_unlock_irq(&rtc->irq_lock);
42
43		return 0;
44	}
45
46	/* something has gone wrong, release the lock */
47	mutex_unlock(&rtc->char_lock);
48	return err;
49}
50
51#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
52/*
53 * Routine to poll RTC seconds field for change as often as possible,
54 * after first RTC_UIE use timer to reduce polling
55 */
56static void rtc_uie_task(struct work_struct *work)
57{
58	struct rtc_device *rtc =
59		container_of(work, struct rtc_device, uie_task);
60	struct rtc_time tm;
61	int num = 0;
62	int err;
63
64	err = rtc_read_time(rtc, &tm);
65
66	local_irq_disable();
67	spin_lock(&rtc->irq_lock);
68	if (rtc->stop_uie_polling || err) {
69		rtc->uie_task_active = 0;
70	} else if (rtc->oldsecs != tm.tm_sec) {
71		num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
72		rtc->oldsecs = tm.tm_sec;
73		rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
74		rtc->uie_timer_active = 1;
75		rtc->uie_task_active = 0;
76		add_timer(&rtc->uie_timer);
77	} else if (schedule_work(&rtc->uie_task) == 0) {
78		rtc->uie_task_active = 0;
79	}
80	spin_unlock(&rtc->irq_lock);
81	if (num)
82		rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
83	local_irq_enable();
84}
85static void rtc_uie_timer(unsigned long data)
86{
87	struct rtc_device *rtc = (struct rtc_device *)data;
88	unsigned long flags;
89
90	spin_lock_irqsave(&rtc->irq_lock, flags);
91	rtc->uie_timer_active = 0;
92	rtc->uie_task_active = 1;
93	if ((schedule_work(&rtc->uie_task) == 0))
94		rtc->uie_task_active = 0;
95	spin_unlock_irqrestore(&rtc->irq_lock, flags);
96}
97
98static void clear_uie(struct rtc_device *rtc)
99{
100	spin_lock_irq(&rtc->irq_lock);
101	if (rtc->irq_active) {
102		rtc->stop_uie_polling = 1;
103		if (rtc->uie_timer_active) {
104			spin_unlock_irq(&rtc->irq_lock);
105			del_timer_sync(&rtc->uie_timer);
106			spin_lock_irq(&rtc->irq_lock);
107			rtc->uie_timer_active = 0;
108		}
109		if (rtc->uie_task_active) {
110			spin_unlock_irq(&rtc->irq_lock);
111			flush_scheduled_work();
112			spin_lock_irq(&rtc->irq_lock);
113		}
114		rtc->irq_active = 0;
115	}
116	spin_unlock_irq(&rtc->irq_lock);
117}
118
119static int set_uie(struct rtc_device *rtc)
120{
121	struct rtc_time tm;
122	int err;
123
124	err = rtc_read_time(rtc, &tm);
125	if (err)
126		return err;
127	spin_lock_irq(&rtc->irq_lock);
128	if (!rtc->irq_active) {
129		rtc->irq_active = 1;
130		rtc->stop_uie_polling = 0;
131		rtc->oldsecs = tm.tm_sec;
132		rtc->uie_task_active = 1;
133		if (schedule_work(&rtc->uie_task) == 0)
134			rtc->uie_task_active = 0;
135	}
136	rtc->irq_data = 0;
137	spin_unlock_irq(&rtc->irq_lock);
138	return 0;
139}
140#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
141
142static ssize_t
143rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
144{
145	struct rtc_device *rtc = to_rtc_device(file->private_data);
146
147	DECLARE_WAITQUEUE(wait, current);
148	unsigned long data;
149	ssize_t ret;
150
151	if (count != sizeof(unsigned int) && count < sizeof(unsigned long))
152		return -EINVAL;
153
154	add_wait_queue(&rtc->irq_queue, &wait);
155	do {
156		__set_current_state(TASK_INTERRUPTIBLE);
157
158		spin_lock_irq(&rtc->irq_lock);
159		data = rtc->irq_data;
160		rtc->irq_data = 0;
161		spin_unlock_irq(&rtc->irq_lock);
162
163		if (data != 0) {
164			ret = 0;
165			break;
166		}
167		if (file->f_flags & O_NONBLOCK) {
168			ret = -EAGAIN;
169			break;
170		}
171		if (signal_pending(current)) {
172			ret = -ERESTARTSYS;
173			break;
174		}
175		schedule();
176	} while (1);
177	set_current_state(TASK_RUNNING);
178	remove_wait_queue(&rtc->irq_queue, &wait);
179
180	if (ret == 0) {
181		/* Check for any data updates */
182		if (rtc->ops->read_callback)
183			data = rtc->ops->read_callback(rtc->dev.parent,
184						       data);
185
186		if (sizeof(int) != sizeof(long) &&
187		    count == sizeof(unsigned int))
188			ret = put_user(data, (unsigned int __user *)buf) ?:
189				sizeof(unsigned int);
190		else
191			ret = put_user(data, (unsigned long __user *)buf) ?:
192				sizeof(unsigned long);
193	}
194	return ret;
195}
196
197static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
198{
199	struct rtc_device *rtc = to_rtc_device(file->private_data);
200	unsigned long data;
201
202	poll_wait(file, &rtc->irq_queue, wait);
203
204	data = rtc->irq_data;
205
206	return (data != 0) ? (POLLIN | POLLRDNORM) : 0;
207}
208
209static int rtc_dev_ioctl(struct inode *inode, struct file *file,
210		unsigned int cmd, unsigned long arg)
211{
212	int err = 0;
213	struct rtc_device *rtc = file->private_data;
214	const struct rtc_class_ops *ops = rtc->ops;
215	struct rtc_time tm;
216	struct rtc_wkalrm alarm;
217	void __user *uarg = (void __user *) arg;
218
219	/* check that the calling task has appropriate permissions
220	 * for certain ioctls. doing this check here is useful
221	 * to avoid duplicate code in each driver.
222	 */
223	switch (cmd) {
224	case RTC_EPOCH_SET:
225	case RTC_SET_TIME:
226		if (!capable(CAP_SYS_TIME))
227			return -EACCES;
228		break;
229
230	case RTC_IRQP_SET:
231		if (arg > rtc->max_user_freq && !capable(CAP_SYS_RESOURCE))
232			return -EACCES;
233		break;
234
235	case RTC_PIE_ON:
236		if (!capable(CAP_SYS_RESOURCE))
237			return -EACCES;
238		break;
239	}
240
241	/* avoid conflicting IRQ users */
242	if (cmd == RTC_PIE_ON || cmd == RTC_PIE_OFF || cmd == RTC_IRQP_SET) {
243		spin_lock_irq(&rtc->irq_task_lock);
244		if (rtc->irq_task)
245			err = -EBUSY;
246		spin_unlock_irq(&rtc->irq_task_lock);
247
248		if (err < 0)
249			return err;
250	}
251
252	/* try the driver's ioctl interface */
253	if (ops->ioctl) {
254		err = ops->ioctl(rtc->dev.parent, cmd, arg);
255		if (err != -ENOIOCTLCMD)
256			return err;
257	}
258
259	/* if the driver does not provide the ioctl interface
260	 * or if that particular ioctl was not implemented
261	 * (-ENOIOCTLCMD), we will try to emulate here.
262	 */
263
264	switch (cmd) {
265	case RTC_ALM_READ:
266		err = rtc_read_alarm(rtc, &alarm);
267		if (err < 0)
268			return err;
269
270		if (copy_to_user(uarg, &alarm.time, sizeof(tm)))
271			return -EFAULT;
272		break;
273
274	case RTC_ALM_SET:
275		if (copy_from_user(&alarm.time, uarg, sizeof(tm)))
276			return -EFAULT;
277
278		alarm.enabled = 0;
279		alarm.pending = 0;
280		alarm.time.tm_wday = -1;
281		alarm.time.tm_yday = -1;
282		alarm.time.tm_isdst = -1;
283
284		/* RTC_ALM_SET alarms may be up to 24 hours in the future.
285		 * Rather than expecting every RTC to implement "don't care"
286		 * for day/month/year fields, just force the alarm to have
287		 * the right values for those fields.
288		 *
289		 * RTC_WKALM_SET should be used instead.  Not only does it
290		 * eliminate the need for a separate RTC_AIE_ON call, it
291		 * doesn't have the "alarm 23:59:59 in the future" race.
292		 *
293		 * NOTE:  some legacy code may have used invalid fields as
294		 * wildcards, exposing hardware "periodic alarm" capabilities.
295		 * Not supported here.
296		 */
297		{
298			unsigned long now, then;
299
300			err = rtc_read_time(rtc, &tm);
301			if (err < 0)
302				return err;
303			rtc_tm_to_time(&tm, &now);
304
305			alarm.time.tm_mday = tm.tm_mday;
306			alarm.time.tm_mon = tm.tm_mon;
307			alarm.time.tm_year = tm.tm_year;
308			err  = rtc_valid_tm(&alarm.time);
309			if (err < 0)
310				return err;
311			rtc_tm_to_time(&alarm.time, &then);
312
313			/* alarm may need to wrap into tomorrow */
314			if (then < now) {
315				rtc_time_to_tm(now + 24 * 60 * 60, &tm);
316				alarm.time.tm_mday = tm.tm_mday;
317				alarm.time.tm_mon = tm.tm_mon;
318				alarm.time.tm_year = tm.tm_year;
319			}
320		}
321
322		err = rtc_set_alarm(rtc, &alarm);
323		break;
324
325	case RTC_RD_TIME:
326		err = rtc_read_time(rtc, &tm);
327		if (err < 0)
328			return err;
329
330		if (copy_to_user(uarg, &tm, sizeof(tm)))
331			return -EFAULT;
332		break;
333
334	case RTC_SET_TIME:
335		if (copy_from_user(&tm, uarg, sizeof(tm)))
336			return -EFAULT;
337
338		err = rtc_set_time(rtc, &tm);
339		break;
340
341	case RTC_IRQP_READ:
342		if (ops->irq_set_freq)
343			err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
344		break;
345
346	case RTC_IRQP_SET:
347		if (ops->irq_set_freq)
348			err = rtc_irq_set_freq(rtc, rtc->irq_task, arg);
349		break;
350
351	case RTC_WKALM_SET:
352		if (copy_from_user(&alarm, uarg, sizeof(alarm)))
353			return -EFAULT;
354
355		err = rtc_set_alarm(rtc, &alarm);
356		break;
357
358	case RTC_WKALM_RD:
359		err = rtc_read_alarm(rtc, &alarm);
360		if (err < 0)
361			return err;
362
363		if (copy_to_user(uarg, &alarm, sizeof(alarm)))
364			return -EFAULT;
365		break;
366
367#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
368	case RTC_UIE_OFF:
369		clear_uie(rtc);
370		return 0;
371
372	case RTC_UIE_ON:
373		return set_uie(rtc);
374#endif
375	default:
376		err = -ENOTTY;
377		break;
378	}
379
380	return err;
381}
382
383static int rtc_dev_release(struct inode *inode, struct file *file)
384{
385	struct rtc_device *rtc = to_rtc_device(file->private_data);
386
387#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
388	clear_uie(rtc);
389#endif
390	if (rtc->ops->release)
391		rtc->ops->release(rtc->dev.parent);
392
393	mutex_unlock(&rtc->char_lock);
394	return 0;
395}
396
397static int rtc_dev_fasync(int fd, struct file *file, int on)
398{
399	struct rtc_device *rtc = to_rtc_device(file->private_data);
400	return fasync_helper(fd, file, on, &rtc->async_queue);
401}
402
403static const struct file_operations rtc_dev_fops = {
404	.owner		= THIS_MODULE,
405	.llseek		= no_llseek,
406	.read		= rtc_dev_read,
407	.poll		= rtc_dev_poll,
408	.ioctl		= rtc_dev_ioctl,
409	.open		= rtc_dev_open,
410	.release	= rtc_dev_release,
411	.fasync		= rtc_dev_fasync,
412};
413
414/* insertion/removal hooks */
415
416void rtc_dev_prepare(struct rtc_device *rtc)
417{
418	if (!rtc_devt)
419		return;
420
421	if (rtc->id >= RTC_DEV_MAX) {
422		pr_debug("%s: too many RTC devices\n", rtc->name);
423		return;
424	}
425
426	rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
427
428	mutex_init(&rtc->char_lock);
429	spin_lock_init(&rtc->irq_lock);
430	init_waitqueue_head(&rtc->irq_queue);
431#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
432	INIT_WORK(&rtc->uie_task, rtc_uie_task);
433	setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
434#endif
435
436	cdev_init(&rtc->char_dev, &rtc_dev_fops);
437	rtc->char_dev.owner = rtc->owner;
438}
439
440void rtc_dev_add_device(struct rtc_device *rtc)
441{
442	if (cdev_add(&rtc->char_dev, rtc->dev.devt, 1))
443		printk(KERN_WARNING "%s: failed to add char device %d:%d\n",
444			rtc->name, MAJOR(rtc_devt), rtc->id);
445	else
446		pr_debug("%s: dev (%d:%d)\n", rtc->name,
447			MAJOR(rtc_devt), rtc->id);
448}
449
450void rtc_dev_del_device(struct rtc_device *rtc)
451{
452	if (rtc->dev.devt)
453		cdev_del(&rtc->char_dev);
454}
455
456void __init rtc_dev_init(void)
457{
458	int err;
459
460	err = alloc_chrdev_region(&rtc_devt, 0, RTC_DEV_MAX, "rtc");
461	if (err < 0)
462		printk(KERN_ERR "%s: failed to allocate char dev region\n",
463			__FILE__);
464}
465
466void __exit rtc_dev_exit(void)
467{
468	if (rtc_devt)
469		unregister_chrdev_region(rtc_devt, RTC_DEV_MAX);
470}
471