1/*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 *   * Redistributions of source code must retain the above copyright
12 *     notice, this list of conditions and the following disclaimer.
13 *   * Redistributions in binary form must reproduce the above copyright
14 *     notice, this list of conditions and the following disclaimer in
15 *     the documentation and/or other materials provided with the
16 *     distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include <dev/isci/isci.h>
35
36#include <sys/conf.h>
37#include <sys/malloc.h>
38
39#include <cam/cam_periph.h>
40#include <cam/cam_xpt_periph.h>
41
42#include <dev/isci/scil/sci_memory_descriptor_list.h>
43#include <dev/isci/scil/sci_memory_descriptor_list_decorator.h>
44
45#include <dev/isci/scil/scif_controller.h>
46#include <dev/isci/scil/scif_library.h>
47#include <dev/isci/scil/scif_io_request.h>
48#include <dev/isci/scil/scif_task_request.h>
49#include <dev/isci/scil/scif_remote_device.h>
50#include <dev/isci/scil/scif_domain.h>
51#include <dev/isci/scil/scif_user_callback.h>
52#include <dev/isci/scil/scic_sgpio.h>
53
54#include <dev/led/led.h>
55
56void isci_action(struct cam_sim *sim, union ccb *ccb);
57void isci_poll(struct cam_sim *sim);
58
59#define ccb_sim_ptr sim_priv.entries[0].ptr
60
61/**
62 * @brief This user callback will inform the user that the controller has
63 *        had a serious unexpected error.  The user should not the error,
64 *        disable interrupts, and wait for current ongoing processing to
65 *        complete.  Subsequently, the user should reset the controller.
66 *
67 * @param[in]  controller This parameter specifies the controller that had
68 *                        an error.
69 *
70 * @return none
71 */
72void scif_cb_controller_error(SCI_CONTROLLER_HANDLE_T controller,
73    SCI_CONTROLLER_ERROR error)
74{
75
76	isci_log_message(0, "ISCI", "scif_cb_controller_error: 0x%x\n",
77	    error);
78}
79
80/**
81 * @brief This user callback will inform the user that the controller has
82 *        finished the start process.
83 *
84 * @param[in]  controller This parameter specifies the controller that was
85 *             started.
86 * @param[in]  completion_status This parameter specifies the results of
87 *             the start operation.  SCI_SUCCESS indicates successful
88 *             completion.
89 *
90 * @return none
91 */
92void scif_cb_controller_start_complete(SCI_CONTROLLER_HANDLE_T controller,
93    SCI_STATUS completion_status)
94{
95	uint32_t index;
96	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
97	    sci_object_get_association(controller);
98
99	isci_controller->is_started = TRUE;
100
101	/* Set bits for all domains.  We will clear them one-by-one once
102	 *  the domains complete discovery, or return error when calling
103	 *  scif_domain_discover.  Once all bits are clear, we will register
104	 *  the controller with CAM.
105	 */
106	isci_controller->initial_discovery_mask = (1 << SCI_MAX_DOMAINS) - 1;
107
108	for(index = 0; index < SCI_MAX_DOMAINS; index++) {
109		SCI_STATUS status;
110		SCI_DOMAIN_HANDLE_T domain =
111		    isci_controller->domain[index].sci_object;
112
113		status = scif_domain_discover(
114			domain,
115			scif_domain_get_suggested_discover_timeout(domain),
116			DEVICE_TIMEOUT
117		);
118
119		if (status != SCI_SUCCESS)
120		{
121			isci_controller_domain_discovery_complete(
122			    isci_controller, &isci_controller->domain[index]);
123		}
124	}
125}
126
127/**
128 * @brief This user callback will inform the user that the controller has
129 *        finished the stop process. Note, after user calls
130 *        scif_controller_stop(), before user receives this controller stop
131 *        complete callback, user should not expect any callback from
132 *        framework, such like scif_cb_domain_change_notification().
133 *
134 * @param[in]  controller This parameter specifies the controller that was
135 *             stopped.
136 * @param[in]  completion_status This parameter specifies the results of
137 *             the stop operation.  SCI_SUCCESS indicates successful
138 *             completion.
139 *
140 * @return none
141 */
142void scif_cb_controller_stop_complete(SCI_CONTROLLER_HANDLE_T controller,
143    SCI_STATUS completion_status)
144{
145	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
146	    sci_object_get_association(controller);
147
148	isci_controller->is_started = FALSE;
149}
150
151static void
152isci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
153{
154	SCI_PHYSICAL_ADDRESS *phys_addr = arg;
155
156	*phys_addr = seg[0].ds_addr;
157}
158
159/**
160 * @brief This method will be invoked to allocate memory dynamically.
161 *
162 * @param[in]  controller This parameter represents the controller
163 *             object for which to allocate memory.
164 * @param[out] mde This parameter represents the memory descriptor to
165 *             be filled in by the user that will reference the newly
166 *             allocated memory.
167 *
168 * @return none
169 */
170void scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller,
171    SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde)
172{
173	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
174	    sci_object_get_association(controller);
175
176	/*
177	 * Note this routine is only used for buffers needed to translate
178	 * SCSI UNMAP commands to ATA DSM commands for SATA disks.
179	 *
180	 * We first try to pull a buffer from the controller's pool, and only
181	 * call contigmalloc if one isn't there.
182	 */
183	if (!sci_pool_empty(isci_controller->unmap_buffer_pool)) {
184		sci_pool_get(isci_controller->unmap_buffer_pool,
185		    mde->virtual_address);
186	} else
187		mde->virtual_address = contigmalloc(PAGE_SIZE,
188		    M_ISCI, M_NOWAIT, 0, BUS_SPACE_MAXADDR,
189		    mde->constant_memory_alignment, 0);
190
191	if (mde->virtual_address != NULL)
192		bus_dmamap_load(isci_controller->buffer_dma_tag,
193		    NULL, mde->virtual_address, PAGE_SIZE,
194		    isci_single_map, &mde->physical_address,
195		    BUS_DMA_NOWAIT);
196}
197
198/**
199 * @brief This method will be invoked to allocate memory dynamically.
200 *
201 * @param[in]  controller This parameter represents the controller
202 *             object for which to allocate memory.
203 * @param[out] mde This parameter represents the memory descriptor to
204 *             be filled in by the user that will reference the newly
205 *             allocated memory.
206 *
207 * @return none
208 */
209void scif_cb_controller_free_memory(SCI_CONTROLLER_HANDLE_T controller,
210    SCI_PHYSICAL_MEMORY_DESCRIPTOR_T * mde)
211{
212	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
213	    sci_object_get_association(controller);
214
215	/*
216	 * Put the buffer back into the controller's buffer pool, rather
217	 * than invoking configfree.  This helps reduce chance we won't
218	 * have buffers available when system is under memory pressure.
219	 */
220	sci_pool_put(isci_controller->unmap_buffer_pool,
221	    mde->virtual_address);
222}
223
224void isci_controller_construct(struct ISCI_CONTROLLER *controller,
225    struct isci_softc *isci)
226{
227	SCI_CONTROLLER_HANDLE_T scif_controller_handle;
228
229	scif_library_allocate_controller(isci->sci_library_handle,
230	    &scif_controller_handle);
231
232	scif_controller_construct(isci->sci_library_handle,
233	    scif_controller_handle, NULL);
234
235	controller->isci = isci;
236	controller->scif_controller_handle = scif_controller_handle;
237
238	/* This allows us to later use
239	 *  sci_object_get_association(scif_controller_handle)
240	 * inside of a callback routine to get our struct ISCI_CONTROLLER object
241	 */
242	sci_object_set_association(scif_controller_handle, (void *)controller);
243
244	controller->is_started = FALSE;
245	controller->is_frozen = FALSE;
246	controller->release_queued_ccbs = FALSE;
247	controller->sim = NULL;
248	controller->initial_discovery_mask = 0;
249
250	sci_fast_list_init(&controller->pending_device_reset_list);
251
252	mtx_init(&controller->lock, "isci", NULL, MTX_DEF);
253
254	uint32_t domain_index;
255
256	for(domain_index = 0; domain_index < SCI_MAX_DOMAINS; domain_index++) {
257		isci_domain_construct( &controller->domain[domain_index],
258		    domain_index, controller);
259	}
260
261	controller->timer_memory = malloc(
262	    sizeof(struct ISCI_TIMER) * SCI_MAX_TIMERS, M_ISCI,
263	    M_NOWAIT | M_ZERO);
264
265	sci_pool_initialize(controller->timer_pool);
266
267	struct ISCI_TIMER *timer = (struct ISCI_TIMER *)
268	    controller->timer_memory;
269
270	for ( int i = 0; i < SCI_MAX_TIMERS; i++ ) {
271		sci_pool_put(controller->timer_pool, timer++);
272	}
273
274	sci_pool_initialize(controller->unmap_buffer_pool);
275}
276
277static void isci_led_fault_func(void *priv, int onoff)
278{
279	struct ISCI_PHY *phy = priv;
280
281	/* map onoff to the fault LED */
282	phy->led_fault = onoff;
283	scic_sgpio_update_led_state(phy->handle, 1 << phy->index,
284		phy->led_fault, phy->led_locate, 0);
285}
286
287static void isci_led_locate_func(void *priv, int onoff)
288{
289	struct ISCI_PHY *phy = priv;
290
291	/* map onoff to the locate LED */
292	phy->led_locate = onoff;
293	scic_sgpio_update_led_state(phy->handle, 1 << phy->index,
294		phy->led_fault, phy->led_locate, 0);
295}
296
297SCI_STATUS isci_controller_initialize(struct ISCI_CONTROLLER *controller)
298{
299	SCIC_USER_PARAMETERS_T scic_user_parameters;
300	SCI_CONTROLLER_HANDLE_T scic_controller_handle;
301	char led_name[64];
302	unsigned long tunable;
303	int i;
304
305	scic_controller_handle =
306	    scif_controller_get_scic_handle(controller->scif_controller_handle);
307
308	if (controller->isci->oem_parameters_found == TRUE)
309	{
310		scic_oem_parameters_set(
311		    scic_controller_handle,
312		    &controller->oem_parameters,
313		    (uint8_t)(controller->oem_parameters_version));
314	}
315
316	scic_user_parameters_get(scic_controller_handle, &scic_user_parameters);
317
318	if (TUNABLE_ULONG_FETCH("hw.isci.no_outbound_task_timeout", &tunable))
319		scic_user_parameters.sds1.no_outbound_task_timeout =
320		    (uint8_t)tunable;
321
322	if (TUNABLE_ULONG_FETCH("hw.isci.ssp_max_occupancy_timeout", &tunable))
323		scic_user_parameters.sds1.ssp_max_occupancy_timeout =
324		    (uint16_t)tunable;
325
326	if (TUNABLE_ULONG_FETCH("hw.isci.stp_max_occupancy_timeout", &tunable))
327		scic_user_parameters.sds1.stp_max_occupancy_timeout =
328		    (uint16_t)tunable;
329
330	if (TUNABLE_ULONG_FETCH("hw.isci.ssp_inactivity_timeout", &tunable))
331		scic_user_parameters.sds1.ssp_inactivity_timeout =
332		    (uint16_t)tunable;
333
334	if (TUNABLE_ULONG_FETCH("hw.isci.stp_inactivity_timeout", &tunable))
335		scic_user_parameters.sds1.stp_inactivity_timeout =
336		    (uint16_t)tunable;
337
338	if (TUNABLE_ULONG_FETCH("hw.isci.max_speed_generation", &tunable))
339		for (i = 0; i < SCI_MAX_PHYS; i++)
340			scic_user_parameters.sds1.phys[i].max_speed_generation =
341			    (uint8_t)tunable;
342
343	scic_user_parameters_set(scic_controller_handle, &scic_user_parameters);
344
345	/* Scheduler bug in SCU requires SCIL to reserve some task contexts as a
346	 *  a workaround - one per domain.
347	 */
348	controller->queue_depth = SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS;
349
350	if (TUNABLE_INT_FETCH("hw.isci.controller_queue_depth",
351	    &controller->queue_depth)) {
352		controller->queue_depth = max(1, min(controller->queue_depth,
353		    SCI_MAX_IO_REQUESTS - SCI_MAX_DOMAINS));
354	}
355
356	/* Reserve one request so that we can ensure we have one available TC
357	 *  to do internal device resets.
358	 */
359	controller->sim_queue_depth = controller->queue_depth - 1;
360
361	/* Although we save one TC to do internal device resets, it is possible
362	 *  we could end up using several TCs for simultaneous device resets
363	 *  while at the same time having CAM fill our controller queue.  To
364	 *  simulate this condition, and how our driver handles it, we can set
365	 *  this io_shortage parameter, which will tell CAM that we have a
366	 *  large queue depth than we really do.
367	 */
368	uint32_t io_shortage = 0;
369	TUNABLE_INT_FETCH("hw.isci.io_shortage", &io_shortage);
370	controller->sim_queue_depth += io_shortage;
371
372	/* Attach to CAM using xpt_bus_register now, then immediately freeze
373	 *  the simq.  It will get released later when initial domain discovery
374	 *  is complete.
375	 */
376	controller->has_been_scanned = FALSE;
377	mtx_lock(&controller->lock);
378	isci_controller_attach_to_cam(controller);
379	xpt_freeze_simq(controller->sim, 1);
380	mtx_unlock(&controller->lock);
381
382	for (i = 0; i < SCI_MAX_PHYS; i++) {
383		controller->phys[i].handle = scic_controller_handle;
384		controller->phys[i].index = i;
385
386		/* fault */
387		controller->phys[i].led_fault = 0;
388		sprintf(led_name, "isci.bus%d.port%d.fault", controller->index, i);
389		controller->phys[i].cdev_fault = led_create(isci_led_fault_func,
390		    &controller->phys[i], led_name);
391
392		/* locate */
393		controller->phys[i].led_locate = 0;
394		sprintf(led_name, "isci.bus%d.port%d.locate", controller->index, i);
395		controller->phys[i].cdev_locate = led_create(isci_led_locate_func,
396		    &controller->phys[i], led_name);
397	}
398
399	return (scif_controller_initialize(controller->scif_controller_handle));
400}
401
402int isci_controller_allocate_memory(struct ISCI_CONTROLLER *controller)
403{
404	int error;
405	device_t device =  controller->isci->device;
406	uint32_t max_segment_size = isci_io_request_get_max_io_size();
407	uint32_t status = 0;
408	struct ISCI_MEMORY *uncached_controller_memory =
409	    &controller->uncached_controller_memory;
410	struct ISCI_MEMORY *cached_controller_memory =
411	    &controller->cached_controller_memory;
412	struct ISCI_MEMORY *request_memory =
413	    &controller->request_memory;
414	POINTER_UINT virtual_address;
415	bus_addr_t physical_address;
416
417	controller->mdl = sci_controller_get_memory_descriptor_list_handle(
418	    controller->scif_controller_handle);
419
420	uncached_controller_memory->size = sci_mdl_decorator_get_memory_size(
421	    controller->mdl, SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS);
422
423	error = isci_allocate_dma_buffer(device, uncached_controller_memory);
424
425	if (error != 0)
426	    return (error);
427
428	sci_mdl_decorator_assign_memory( controller->mdl,
429	    SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
430	    uncached_controller_memory->virtual_address,
431	    uncached_controller_memory->physical_address);
432
433	cached_controller_memory->size = sci_mdl_decorator_get_memory_size(
434	    controller->mdl,
435	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
436	);
437
438	error = isci_allocate_dma_buffer(device, cached_controller_memory);
439
440	if (error != 0)
441	    return (error);
442
443	sci_mdl_decorator_assign_memory(controller->mdl,
444	    SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS,
445	    cached_controller_memory->virtual_address,
446	    cached_controller_memory->physical_address);
447
448	request_memory->size =
449	    controller->queue_depth * isci_io_request_get_object_size();
450
451	error = isci_allocate_dma_buffer(device, request_memory);
452
453	if (error != 0)
454	    return (error);
455
456	/* For STP PIO testing, we want to ensure we can force multiple SGLs
457	 *  since this has been a problem area in SCIL.  This tunable parameter
458	 *  will allow us to force DMA segments to a smaller size, ensuring
459	 *  that even if a physically contiguous buffer is attached to this
460	 *  I/O, the DMA subsystem will pass us multiple segments in our DMA
461	 *  load callback.
462	 */
463	TUNABLE_INT_FETCH("hw.isci.max_segment_size", &max_segment_size);
464
465	/* Create DMA tag for our I/O requests.  Then we can create DMA maps based off
466	 *  of this tag and store them in each of our ISCI_IO_REQUEST objects.  This
467	 *  will enable better performance than creating the DMA maps everytime we get
468	 *  an I/O.
469	 */
470	status = bus_dma_tag_create(bus_get_dma_tag(device), 0x1, 0x0,
471	    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
472	    isci_io_request_get_max_io_size(),
473	    SCI_MAX_SCATTER_GATHER_ELEMENTS, max_segment_size, 0, NULL, NULL,
474	    &controller->buffer_dma_tag);
475
476	sci_pool_initialize(controller->request_pool);
477
478	virtual_address = request_memory->virtual_address;
479	physical_address = request_memory->physical_address;
480
481	for (int i = 0; i < controller->queue_depth; i++) {
482		struct ISCI_REQUEST *request =
483		    (struct ISCI_REQUEST *)virtual_address;
484
485		isci_request_construct(request,
486		    controller->scif_controller_handle,
487		    controller->buffer_dma_tag, physical_address);
488
489		sci_pool_put(controller->request_pool, request);
490
491		virtual_address += isci_request_get_object_size();
492		physical_address += isci_request_get_object_size();
493	}
494
495	uint32_t remote_device_size = sizeof(struct ISCI_REMOTE_DEVICE) +
496	    scif_remote_device_get_object_size();
497
498	controller->remote_device_memory = (uint8_t *) malloc(
499	    remote_device_size * SCI_MAX_REMOTE_DEVICES, M_ISCI,
500	    M_NOWAIT | M_ZERO);
501
502	sci_pool_initialize(controller->remote_device_pool);
503
504	uint8_t *remote_device_memory_ptr = controller->remote_device_memory;
505
506	for (int i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
507		struct ISCI_REMOTE_DEVICE *remote_device =
508		    (struct ISCI_REMOTE_DEVICE *)remote_device_memory_ptr;
509
510		controller->remote_device[i] = NULL;
511		remote_device->index = i;
512		remote_device->is_resetting = FALSE;
513		remote_device->frozen_lun_mask = 0;
514		sci_fast_list_element_init(remote_device,
515		    &remote_device->pending_device_reset_element);
516		TAILQ_INIT(&remote_device->queued_ccbs);
517		remote_device->release_queued_ccb = FALSE;
518		remote_device->queued_ccb_in_progress = NULL;
519
520		/*
521		 * For the first SCI_MAX_DOMAINS device objects, do not put
522		 *  them in the pool, rather assign them to each domain.  This
523		 *  ensures that any device attached directly to port "i" will
524		 *  always get CAM target id "i".
525		 */
526		if (i < SCI_MAX_DOMAINS)
527			controller->domain[i].da_remote_device = remote_device;
528		else
529			sci_pool_put(controller->remote_device_pool,
530			    remote_device);
531		remote_device_memory_ptr += remote_device_size;
532	}
533
534	return (0);
535}
536
537void isci_controller_start(void *controller_handle)
538{
539	struct ISCI_CONTROLLER *controller =
540	    (struct ISCI_CONTROLLER *)controller_handle;
541	SCI_CONTROLLER_HANDLE_T scif_controller_handle =
542	    controller->scif_controller_handle;
543
544	scif_controller_start(scif_controller_handle,
545	    scif_controller_get_suggested_start_timeout(scif_controller_handle));
546
547	scic_controller_enable_interrupts(
548	    scif_controller_get_scic_handle(controller->scif_controller_handle));
549}
550
551void isci_controller_domain_discovery_complete(
552    struct ISCI_CONTROLLER *isci_controller, struct ISCI_DOMAIN *isci_domain)
553{
554	if (!isci_controller->has_been_scanned)
555	{
556		/* Controller has not been scanned yet.  We'll clear
557		 *  the discovery bit for this domain, then check if all bits
558		 *  are now clear.  That would indicate that all domains are
559		 *  done with discovery and we can then proceed with initial
560		 *  scan.
561		 */
562
563		isci_controller->initial_discovery_mask &=
564		    ~(1 << isci_domain->index);
565
566		if (isci_controller->initial_discovery_mask == 0) {
567			struct isci_softc *driver = isci_controller->isci;
568			uint8_t next_index = isci_controller->index + 1;
569
570			isci_controller->has_been_scanned = TRUE;
571
572			/* Unfreeze simq to allow initial scan to proceed. */
573			xpt_release_simq(isci_controller->sim, TRUE);
574
575#if __FreeBSD_version < 800000
576			/* When driver is loaded after boot, we need to
577			 *  explicitly rescan here for versions <8.0, because
578			 *  CAM only automatically scans new buses at boot
579			 *  time.
580			 */
581			union ccb *ccb = xpt_alloc_ccb_nowait();
582
583			xpt_create_path(&ccb->ccb_h.path, NULL,
584			    cam_sim_path(isci_controller->sim),
585			    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
586
587			xpt_rescan(ccb);
588#endif
589
590			if (next_index < driver->controller_count) {
591				/*  There are more controllers that need to
592				 *   start.  So start the next one.
593				 */
594				isci_controller_start(
595				    &driver->controllers[next_index]);
596			}
597			else
598			{
599				/* All controllers have been started and completed discovery.
600				 *  Disestablish the config hook while will signal to the
601				 *  kernel during boot that it is safe to try to find and
602				 *  mount the root partition.
603				 */
604				config_intrhook_disestablish(
605				    &driver->config_hook);
606			}
607		}
608	}
609}
610
611int isci_controller_attach_to_cam(struct ISCI_CONTROLLER *controller)
612{
613	struct isci_softc *isci = controller->isci;
614	device_t parent = device_get_parent(isci->device);
615	int unit = device_get_unit(isci->device);
616	struct cam_devq *isci_devq = cam_simq_alloc(controller->sim_queue_depth);
617
618	if(isci_devq == NULL) {
619		isci_log_message(0, "ISCI", "isci_devq is NULL \n");
620		return (-1);
621	}
622
623	controller->sim = cam_sim_alloc(isci_action, isci_poll, "isci",
624	    controller, unit, &controller->lock, controller->sim_queue_depth,
625	    controller->sim_queue_depth, isci_devq);
626
627	if(controller->sim == NULL) {
628		isci_log_message(0, "ISCI", "cam_sim_alloc... fails\n");
629		cam_simq_free(isci_devq);
630		return (-1);
631	}
632
633	if(xpt_bus_register(controller->sim, parent, controller->index)
634	    != CAM_SUCCESS) {
635		isci_log_message(0, "ISCI", "xpt_bus_register...fails \n");
636		cam_sim_free(controller->sim, TRUE);
637		mtx_unlock(&controller->lock);
638		return (-1);
639	}
640
641	if(xpt_create_path(&controller->path, NULL,
642	    cam_sim_path(controller->sim), CAM_TARGET_WILDCARD,
643	    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
644		isci_log_message(0, "ISCI", "xpt_create_path....fails\n");
645		xpt_bus_deregister(cam_sim_path(controller->sim));
646		cam_sim_free(controller->sim, TRUE);
647		mtx_unlock(&controller->lock);
648		return (-1);
649	}
650
651	return (0);
652}
653
654void isci_poll(struct cam_sim *sim)
655{
656	struct ISCI_CONTROLLER *controller =
657	    (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
658
659	isci_interrupt_poll_handler(controller);
660}
661
662void isci_action(struct cam_sim *sim, union ccb *ccb)
663{
664	struct ISCI_CONTROLLER *controller =
665	    (struct ISCI_CONTROLLER *)cam_sim_softc(sim);
666
667	switch ( ccb->ccb_h.func_code ) {
668	case XPT_PATH_INQ:
669		{
670			struct ccb_pathinq *cpi = &ccb->cpi;
671			int bus = cam_sim_bus(sim);
672			ccb->ccb_h.ccb_sim_ptr = sim;
673			cpi->version_num = 1;
674			cpi->hba_inquiry = PI_TAG_ABLE;
675			cpi->target_sprt = 0;
676			cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN |
677			    PIM_UNMAPPED;
678			cpi->hba_eng_cnt = 0;
679			cpi->max_target = SCI_MAX_REMOTE_DEVICES - 1;
680			cpi->max_lun = ISCI_MAX_LUN;
681#if __FreeBSD_version >= 800102
682			cpi->maxio = isci_io_request_get_max_io_size();
683#endif
684			cpi->unit_number = cam_sim_unit(sim);
685			cpi->bus_id = bus;
686			cpi->initiator_id = SCI_MAX_REMOTE_DEVICES;
687			cpi->base_transfer_speed = 300000;
688			strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
689			strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
690			strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
691			cpi->transport = XPORT_SAS;
692			cpi->transport_version = 0;
693			cpi->protocol = PROTO_SCSI;
694			cpi->protocol_version = SCSI_REV_SPC2;
695			cpi->ccb_h.status = CAM_REQ_CMP;
696			xpt_done(ccb);
697		}
698		break;
699	case XPT_GET_TRAN_SETTINGS:
700		{
701			struct ccb_trans_settings *general_settings = &ccb->cts;
702			struct ccb_trans_settings_sas *sas_settings =
703			    &general_settings->xport_specific.sas;
704			struct ccb_trans_settings_scsi *scsi_settings =
705			    &general_settings->proto_specific.scsi;
706			struct ISCI_REMOTE_DEVICE *remote_device;
707
708			remote_device = controller->remote_device[ccb->ccb_h.target_id];
709
710			if (remote_device == NULL) {
711				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
712				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
713				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
714				xpt_done(ccb);
715				break;
716			}
717
718			general_settings->protocol = PROTO_SCSI;
719			general_settings->transport = XPORT_SAS;
720			general_settings->protocol_version = SCSI_REV_SPC2;
721			general_settings->transport_version = 0;
722			scsi_settings->valid = CTS_SCSI_VALID_TQ;
723			scsi_settings->flags = CTS_SCSI_FLAGS_TAG_ENB;
724			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
725			ccb->ccb_h.status |= CAM_REQ_CMP;
726
727			sas_settings->bitrate =
728			    isci_remote_device_get_bitrate(remote_device);
729
730			if (sas_settings->bitrate != 0)
731				sas_settings->valid = CTS_SAS_VALID_SPEED;
732
733			xpt_done(ccb);
734		}
735		break;
736	case XPT_SCSI_IO:
737		isci_io_request_execute_scsi_io(ccb, controller);
738		break;
739#if __FreeBSD_version >= 900026
740	case XPT_SMP_IO:
741		isci_io_request_execute_smp_io(ccb, controller);
742		break;
743#endif
744	case XPT_SET_TRAN_SETTINGS:
745		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
746		ccb->ccb_h.status |= CAM_REQ_CMP;
747		xpt_done(ccb);
748		break;
749	case XPT_CALC_GEOMETRY:
750		cam_calc_geometry(&ccb->ccg, /*extended*/1);
751		xpt_done(ccb);
752		break;
753	case XPT_RESET_DEV:
754		{
755			struct ISCI_REMOTE_DEVICE *remote_device =
756			    controller->remote_device[ccb->ccb_h.target_id];
757
758			if (remote_device != NULL)
759				isci_remote_device_reset(remote_device, ccb);
760			else {
761				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
762				ccb->ccb_h.status &= ~CAM_STATUS_MASK;
763				ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
764				xpt_done(ccb);
765			}
766		}
767		break;
768	case XPT_RESET_BUS:
769		ccb->ccb_h.status = CAM_REQ_CMP;
770		xpt_done(ccb);
771		break;
772	default:
773		isci_log_message(0, "ISCI", "Unhandled func_code 0x%x\n",
774		    ccb->ccb_h.func_code);
775		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
776		ccb->ccb_h.status &= ~CAM_STATUS_MASK;
777		ccb->ccb_h.status |= CAM_REQ_INVALID;
778		xpt_done(ccb);
779		break;
780	}
781}
782
783/*
784 * Unfortunately, SCIL doesn't cleanly handle retry conditions.
785 *  CAM_REQUEUE_REQ works only when no one is using the pass(4) interface.  So
786 *  when SCIL denotes an I/O needs to be retried (typically because of mixing
787 *  tagged/non-tagged ATA commands, or running out of NCQ slots), we queue
788 *  these I/O internally.  Once SCIL completes an I/O to this device, or we get
789 *  a ready notification, we will retry the first I/O on the queue.
790 *  Unfortunately, SCIL also doesn't cleanly handle starting the new I/O within
791 *  the context of the completion handler, so we need to retry these I/O after
792 *  the completion handler is done executing.
793 */
794void
795isci_controller_release_queued_ccbs(struct ISCI_CONTROLLER *controller)
796{
797	struct ISCI_REMOTE_DEVICE *dev;
798	struct ccb_hdr *ccb_h;
799	int dev_idx;
800
801	KASSERT(mtx_owned(&controller->lock), ("controller lock not owned"));
802
803	controller->release_queued_ccbs = FALSE;
804	for (dev_idx = 0;
805	     dev_idx < SCI_MAX_REMOTE_DEVICES;
806	     dev_idx++) {
807
808		dev = controller->remote_device[dev_idx];
809		if (dev != NULL &&
810		    dev->release_queued_ccb == TRUE &&
811		    dev->queued_ccb_in_progress == NULL) {
812			dev->release_queued_ccb = FALSE;
813			ccb_h = TAILQ_FIRST(&dev->queued_ccbs);
814
815			if (ccb_h == NULL)
816				continue;
817
818			isci_log_message(1, "ISCI", "release %p %x\n", ccb_h,
819			    ((union ccb *)ccb_h)->csio.cdb_io.cdb_bytes[0]);
820
821			dev->queued_ccb_in_progress = (union ccb *)ccb_h;
822			isci_io_request_execute_scsi_io(
823			    (union ccb *)ccb_h, controller);
824		}
825	}
826}
827