1/*
2 * Copyright 2002/03, Thomas Kurschel. All rights reserved.
3 * Distributed under the terms of the MIT License.
4 */
5
6/*
7	Part of Open IDE bus manager
8
9	Handling of passive waiting and synchronized procedure calls.
10	The latter are calls that get delayed until the bus is idle.
11*/
12
13
14#include "ide_internal.h"
15#include "ide_sim.h"
16
17#include <string.h>
18
19
20//#define TRACE_SYNC
21#ifdef TRACE_SYNC
22#	define TRACE(x) { dprintf("%s(): ", __FUNCTION__); dprintf x ; }
23#else
24#	define TRACE(x) ;
25#endif
26
27
28/** DPC handler for IRQs */
29
30void
31ide_dpc(void *arg)
32{
33	ide_bus_info *bus = (ide_bus_info *)arg;
34	ide_qrequest *qrequest;
35	ide_device_info *device;
36
37	TRACE(("\n"));
38
39	//snooze(500000);
40
41	// IRQ handler doesn't tell us whether this bus was in async_wait or
42	// in idle state, so we just check whether there is an active request,
43	// which means that we were async_waiting
44	if (bus->active_qrequest != NULL) {
45		TRACE(("continue command\n"));
46
47		// cancel timeout
48		cancel_timer(&bus->timer.te);
49
50		qrequest = bus->active_qrequest;
51		device = qrequest->device;
52
53		// not perfect but simple: we simply know who is waiting why
54		if (device->is_atapi)
55			packet_dpc(qrequest);
56		else {
57			if (qrequest->uses_dma)
58				ata_dpc_DMA(qrequest);
59			else
60				ata_dpc_PIO(qrequest);
61		}
62	} else {
63		// no request active, so this must be a service request or
64		// a spurious IRQ; access_finished will take care of testing
65		// for service requests
66		TRACE(("irq in idle mode - possible service request\n"));
67
68		device = get_current_device(bus);
69		if (device == NULL) {
70			// got an interrupt from a non-existing device
71			// either this is a spurious interrupt or there *is* a device
72			// but we haven't detected it - we better ignore it silently
73			access_finished(bus, bus->first_device);
74		} else {
75			// access_finished always checks the other device first, but as
76			// we do have a service request, we negate the negation
77			access_finished(bus, device->other_device);
78		}
79
80		// let XPT resend commands that got blocked
81		scsi->cont_send_bus(bus->scsi_cookie);
82	}
83
84	return;
85
86/*err:
87	xpt->cont_send( bus->xpt_cookie );*/
88}
89
90
91/** handler for IDE IRQs */
92
93status_t
94ide_irq_handler(ide_bus_info *bus, uint8 status)
95{
96	ide_device_info *device;
97
98	// we need to lock bus to have a solid bus state
99	// (side effect: we lock out the timeout handler and get
100	//  delayed if the IRQ happens at the same time as a command is
101	//  issued; in the latter case, we have no official way to determine
102	//  whether the command was issued before or afterwards; if it was
103	//  afterwards, the device must not be busy; if it was before,
104	//  the device is either busy because of the sent command, or it's
105	//  not busy as the command has already been finished, i.e. there
106	//  was a second IRQ which we've overlooked as we didn't acknowledge
107	//  the first IRQ)
108	IDE_LOCK(bus);
109
110	device = bus->active_device;
111
112	if (device == NULL) {
113		IDE_UNLOCK(bus);
114
115		TRACE(("IRQ though there is no active device\n"));
116		return B_UNHANDLED_INTERRUPT;
117	}
118
119	if ((status & ide_status_bsy) != 0) {
120		// the IRQ seems to be fired before the last command was sent,
121		// i.e. it's not the one that signals finishing of command
122		IDE_UNLOCK(bus);
123
124		TRACE(("IRQ though device is busy\n"));
125		return B_UNHANDLED_INTERRUPT;
126	}
127
128	switch (bus->state) {
129		case ide_state_async_waiting:
130			TRACE(("state: async waiting\n"));
131
132			bus->state = ide_state_accessing;
133
134			IDE_UNLOCK(bus);
135
136			scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_dpc, bus);
137			return B_INVOKE_SCHEDULER;
138
139		case ide_state_idle:
140			TRACE(("state: idle, num_running_reqs %d\n", bus->num_running_reqs));
141
142			// this must be a service request;
143			// if no request is pending, the IRQ was fired wrongly
144			if (bus->num_running_reqs == 0) {
145				IDE_UNLOCK(bus);
146				return B_UNHANDLED_INTERRUPT;
147			}
148
149			bus->state = ide_state_accessing;
150
151			IDE_UNLOCK(bus);
152
153			scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_dpc, bus);
154			return B_INVOKE_SCHEDULER;
155
156		case ide_state_sync_waiting:
157			TRACE(("state: sync waiting\n"));
158
159			bus->state = ide_state_accessing;
160			bus->sync_wait_timeout = false;
161
162			IDE_UNLOCK(bus);
163
164			release_sem_etc(bus->sync_wait_sem, 1, B_DO_NOT_RESCHEDULE);
165			return B_INVOKE_SCHEDULER;
166
167		case ide_state_accessing:
168			TRACE(("state: spurious IRQ - there is a command being executed\n"));
169
170			IDE_UNLOCK(bus);
171			return B_UNHANDLED_INTERRUPT;
172
173		default:
174			dprintf("BUG: unknown state (%d)\n", bus->state);
175
176			IDE_UNLOCK(bus);
177
178			return B_UNHANDLED_INTERRUPT;
179	}
180}
181
182
183/**	cancel IRQ timeout
184 *	it doesn't matter whether there really was a timout running;
185 *	on return, bus state is set to _accessing_
186 */
187
188void
189cancel_irq_timeout(ide_bus_info *bus)
190{
191	IDE_LOCK(bus);
192	bus->state = ide_state_accessing;
193	IDE_UNLOCK(bus);
194
195	cancel_timer(&bus->timer.te);
196}
197
198
199/** start waiting for IRQ with bus lock hold
200 *	new_state must be either sync_wait or async_wait
201 */
202
203void
204start_waiting(ide_bus_info *bus, uint32 timeout, int new_state)
205{
206	int res;
207
208	TRACE(("timeout = %u\n", (uint)timeout));
209
210	bus->state = new_state;
211
212	res = add_timer(&bus->timer.te, ide_timeout,
213		(bigtime_t)timeout * 1000000, B_ONE_SHOT_RELATIVE_TIMER);
214
215	if (res != B_OK)
216		panic("Error setting timeout (%s)", strerror(res));
217
218	IDE_UNLOCK(bus);
219}
220
221
222/** start waiting for IRQ with bus lock not hold */
223
224void
225start_waiting_nolock(ide_bus_info *bus, uint32 timeout, int new_state)
226{
227	IDE_LOCK(bus);
228	start_waiting(bus, timeout, new_state);
229}
230
231
232/** wait for sync IRQ */
233
234void
235wait_for_sync(ide_bus_info *bus)
236{
237	acquire_sem(bus->sync_wait_sem);
238	cancel_timer(&bus->timer.te);
239}
240
241
242/** timeout dpc handler */
243
244static void
245ide_timeout_dpc(void *arg)
246{
247	ide_bus_info *bus = (ide_bus_info *)arg;
248	ide_qrequest *qrequest;
249	ide_device_info *device;
250
251	qrequest = bus->active_qrequest;
252	device = qrequest->device;
253
254	dprintf("ide: ide_timeout_dpc() bus %p, device %p\n", bus, device);
255
256	// this also resets overlapped commands
257	reset_device(device, qrequest);
258
259	device->subsys_status = SCSI_CMD_TIMEOUT;
260
261	if (qrequest->uses_dma) {
262		if (++device->DMA_failures >= MAX_DMA_FAILURES) {
263			dprintf("Disabling DMA because of too many errors\n");
264
265			device->DMA_enabled = false;
266		}
267	}
268
269	// let upper layer do the retry
270	finish_checksense(qrequest);
271}
272
273
274/** timeout handler, called by system timer */
275
276status_t
277ide_timeout(timer *arg)
278{
279	ide_bus_info *bus = ((ide_bus_timer_info *)arg)->bus;
280
281	TRACE(("ide_timeout(): %p\n", bus));
282
283	dprintf("ide: ide_timeout() bus %p\n", bus);
284
285	// we need to lock bus to have a solid bus state
286	// (side effect: we lock out the IRQ handler)
287	IDE_LOCK(bus);
288
289	switch (bus->state) {
290		case ide_state_async_waiting:
291			TRACE(("async waiting\n"));
292
293			bus->state = ide_state_accessing;
294
295			IDE_UNLOCK(bus);
296
297			scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_timeout_dpc, bus);
298			return B_INVOKE_SCHEDULER;
299
300		case ide_state_sync_waiting:
301			TRACE(("sync waiting\n"));
302
303			bus->state = ide_state_accessing;
304			bus->sync_wait_timeout = true;
305
306			IDE_UNLOCK(bus);
307
308			release_sem_etc(bus->sync_wait_sem, 1, B_DO_NOT_RESCHEDULE);
309			return B_INVOKE_SCHEDULER;
310
311		case ide_state_accessing:
312			TRACE(("came too late - IRQ occured already\n"));
313
314			IDE_UNLOCK(bus);
315			return B_DO_NOT_RESCHEDULE;
316
317		default:
318			// this case also happens if a timeout fires too late;
319			// unless there is a bug, the timeout should always be canceled
320			// before declaring bus as being idle
321			dprintf("BUG: unknown state (%d)\n", (int)bus->state);
322
323			IDE_UNLOCK(bus);
324			return B_DO_NOT_RESCHEDULE;
325	}
326}
327
328
329void
330init_synced_pc(ide_synced_pc *pc, ide_synced_pc_func func)
331{
332	pc->func = func;
333	pc->registered = false;
334}
335
336
337void
338uninit_synced_pc(ide_synced_pc *pc)
339{
340	if (pc->registered)
341		panic("Tried to clean up pending synced PC\n");
342}
343
344
345/**	schedule a synced pc
346 *	a synced pc gets executed as soon as the bus becomes idle
347 */
348
349status_t
350schedule_synced_pc(ide_bus_info *bus, ide_synced_pc *pc, void *arg)
351{
352	//TRACE(());
353
354	IDE_LOCK(bus);
355
356	if (pc->registered) {
357		// spc cannot be registered twice
358		TRACE(("already registered\n"));
359		return B_ERROR;
360	} else if( bus->state != ide_state_idle ) {
361		// bus isn't idle - spc must be added to pending list
362		TRACE(("adding to pending list\n"));
363
364		pc->next = bus->synced_pc_list;
365		bus->synced_pc_list = pc;
366		pc->arg = arg;
367		pc->registered = true;
368
369		IDE_UNLOCK(bus);
370		return B_OK;
371	}
372
373	// we have luck - bus is idle, so grab it before
374	// releasing the lock
375
376	TRACE(("exec immediately\n"));
377
378	bus->state = ide_state_accessing;
379	IDE_UNLOCK(bus);
380
381	TRACE(("go\n"));
382	pc->func(bus, arg);
383
384	TRACE(("finished\n"));
385	access_finished(bus, bus->first_device);
386
387	// meanwhile, we may have rejected SCSI commands;
388	// usually, the XPT resends them once a command
389	// has finished, but in this case XPT doesn't know
390	// about our "private" command, so we have to tell about
391	// idle bus manually
392	TRACE(("tell SCSI bus manager about idle bus\n"));
393	scsi->cont_send_bus(bus->scsi_cookie);
394	return B_OK;
395}
396
397
398/** execute list of synced pcs */
399
400static void
401exec_synced_pcs(ide_bus_info *bus, ide_synced_pc *pc_list)
402{
403	ide_synced_pc *pc;
404
405	// noone removes items from pc_list, so we don't need lock
406	// to access entries
407	for (pc = pc_list; pc; pc = pc->next) {
408		pc->func(bus, pc->arg);
409	}
410
411	// need lock now as items can be added to pc_list again as soon
412	// as <registered> is reset
413	IDE_LOCK(bus);
414
415	for (pc = pc_list; pc; pc = pc->next) {
416		pc->registered = false;
417	}
418
419	IDE_UNLOCK(bus);
420}
421
422
423/**	finish bus access;
424 *	check if any device wants to service pending commands + execute synced_pc
425 */
426
427void
428access_finished(ide_bus_info *bus, ide_device_info *device)
429{
430	TRACE(("bus = %p, device = %p\n", bus, device));
431
432	while (true) {
433		ide_synced_pc *synced_pc_list;
434
435		IDE_LOCK(bus);
436
437		// normally, there is always an device; only exception is a
438		// bus without devices, not sure whether this can really happen though
439		if (device) {
440			if (try_service(device))
441				return;
442		}
443
444		// noone wants it, so execute pending synced_pc
445		if (bus->synced_pc_list == NULL) {
446			bus->state = ide_state_idle;
447			IDE_UNLOCK(bus);
448			return;
449		}
450
451		synced_pc_list = bus->synced_pc_list;
452		bus->synced_pc_list = NULL;
453
454		IDE_UNLOCK(bus);
455
456		exec_synced_pcs(bus, synced_pc_list);
457
458		// executed synced_pc may have generated other sync_pc,
459		// thus the loop
460	}
461}
462