kernel - Change ccb state with lock held
[dragonfly.git] / sys / dev / disk / sili / sili.c
... / ...
CommitLineData
1/*
2 * (MPSAFE)
3 *
4 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *
37 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
38 *
39 * Permission to use, copy, modify, and distribute this software for any
40 * purpose with or without fee is hereby granted, provided that the above
41 * copyright notice and this permission notice appear in all copies.
42 *
43 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
44 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
45 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
46 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
47 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
48 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
49 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
50 *
51 *
52 *
53 * $OpenBSD: sili.c,v 1.147 2009/02/16 21:19:07 miod Exp $
54 */
55
56#include "sili.h"
57
58void sili_port_interrupt_enable(struct sili_port *ap);
59void sili_port_interrupt_redisable(struct sili_port *ap);
60void sili_port_interrupt_reenable(struct sili_port *ap);
61
62int sili_load_prb(struct sili_ccb *);
63void sili_unload_prb(struct sili_ccb *);
64static void sili_load_prb_callback(void *info, bus_dma_segment_t *segs,
65 int nsegs, int error);
66void sili_start(struct sili_ccb *);
67static void sili_port_reinit(struct sili_port *ap);
68int sili_port_softreset(struct sili_port *ap);
69int sili_port_hardreset(struct sili_port *ap);
70void sili_port_hardstop(struct sili_port *ap);
71void sili_port_listen(struct sili_port *ap);
72
73static void sili_ata_cmd_timeout_unserialized(void *);
74static int sili_core_timeout(struct sili_ccb *ccb, int really_error);
75void sili_check_active_timeouts(struct sili_port *ap);
76
77void sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb);
78
79void sili_port_read_ncq_error(struct sili_port *, int);
80
81struct sili_dmamem *sili_dmamem_alloc(struct sili_softc *, bus_dma_tag_t tag);
82void sili_dmamem_free(struct sili_softc *, struct sili_dmamem *);
83static void sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error);
84
85static void sili_dummy_done(struct ata_xfer *xa);
86static void sili_empty_done(struct sili_ccb *ccb);
87static void sili_ata_cmd_done(struct sili_ccb *ccb);
88
89/*
90 * Initialize the global SILI hardware. This code does not set up any of
91 * its ports.
92 */
93int
94sili_init(struct sili_softc *sc)
95{
96 DPRINTF(SILI_D_VERBOSE, " GHC 0x%b",
97 sili_read(sc, SILI_REG_GHC), SILI_FMT_GHC);
98
99 /*
100 * Reset the entire chip. This also resets all ports.
101 *
102 * The spec doesn't say anything about how long we have to
103 * wait, so wait 10ms.
104 */
105 sili_write(sc, SILI_REG_GCTL, SILI_REG_GCTL_GRESET);
106 sili_os_sleep(10);
107 sili_write(sc, SILI_REG_GCTL, 0);
108 sili_os_sleep(10);
109
110 return (0);
111}
112
113/*
114 * Allocate and initialize an SILI port.
115 */
116int
117sili_port_alloc(struct sili_softc *sc, u_int port)
118{
119 struct sili_port *ap;
120 struct ata_port *at;
121 struct sili_prb *prb;
122 struct sili_ccb *ccb;
123 int rc = ENOMEM;
124 int error;
125 int i;
126
127 ap = kmalloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO);
128 ap->ap_err_scratch = kmalloc(512, M_DEVBUF, M_WAITOK | M_ZERO);
129
130 ksnprintf(ap->ap_name, sizeof(ap->ap_name), "%s%d.%d",
131 device_get_name(sc->sc_dev),
132 device_get_unit(sc->sc_dev),
133 port);
134 sc->sc_ports[port] = ap;
135
136 /*
137 * Allocate enough so we never have to reallocate, it makes
138 * it easier.
139 *
140 * ap_pmcount will be reduced by the scan if we encounter the
141 * port multiplier port prior to target 15.
142 */
143 if (ap->ap_ata == NULL) {
144 ap->ap_ata = kmalloc(sizeof(*ap->ap_ata) * SILI_MAX_PMPORTS,
145 M_DEVBUF, M_INTWAIT | M_ZERO);
146 for (i = 0; i < SILI_MAX_PMPORTS; ++i) {
147 at = &ap->ap_ata[i];
148 at->at_sili_port = ap;
149 at->at_target = i;
150 at->at_probe = ATA_PROBE_NEED_INIT;
151 at->at_features |= ATA_PORT_F_RESCAN;
152 ksnprintf(at->at_name, sizeof(at->at_name),
153 "%s.%d", ap->ap_name, i);
154 }
155 }
156 if (bus_space_subregion(sc->sc_piot, sc->sc_pioh,
157 SILI_PORT_REGION(port), SILI_PORT_SIZE,
158 &ap->ap_ioh) != 0) {
159 device_printf(sc->sc_dev,
160 "unable to create register window for port %d\n",
161 port);
162 goto freeport;
163 }
164
165 ap->ap_sc = sc;
166 ap->ap_num = port;
167 ap->ap_probe = ATA_PROBE_NEED_INIT;
168 TAILQ_INIT(&ap->ap_ccb_free);
169 TAILQ_INIT(&ap->ap_ccb_pending);
170 lockinit(&ap->ap_ccb_lock, "silipo", 0, 0);
171
172 /* Disable port interrupts */
173 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK);
174
175 /*
176 * Reset the port. This is similar to a Device Reset but far
177 * more invasive. We use Device Reset in our hardreset function.
178 * This function also does the same OOB initialization sequence
179 * that Device Reset does.
180 *
181 * NOTE: SILI_PREG_STATUS_READY will not be asserted unless and until
182 * a device is connected to the port, so we can't use it to
183 * verify that the port exists.
184 */
185 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET);
186 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) {
187 device_printf(sc->sc_dev,
188 "Port %d will not go into reset\n", port);
189 goto freeport;
190 }
191 sili_os_sleep(10);
192 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET);
193
194 /*
195 * Allocate the SGE Table
196 */
197 ap->ap_dmamem_prbs = sili_dmamem_alloc(sc, sc->sc_tag_prbs);
198 if (ap->ap_dmamem_prbs == NULL) {
199 kprintf("%s: NOSGET\n", PORTNAME(ap));
200 goto freeport;
201 }
202
203 /*
204 * Set up the SGE table base address
205 */
206 ap->ap_prbs = (struct sili_prb *)SILI_DMA_KVA(ap->ap_dmamem_prbs);
207
208 /*
209 * Allocate a CCB for each command slot
210 */
211 ap->ap_ccbs = kmalloc(sizeof(struct sili_ccb) * sc->sc_ncmds, M_DEVBUF,
212 M_WAITOK | M_ZERO);
213 if (ap->ap_ccbs == NULL) {
214 device_printf(sc->sc_dev,
215 "unable to allocate command list for port %d\n",
216 port);
217 goto freeport;
218 }
219
220 /*
221 * Most structures are in the port BAR. Assign convenient
222 * pointers in the CCBs
223 */
224 for (i = 0; i < sc->sc_ncmds; i++) {
225 ccb = &ap->ap_ccbs[i];
226
227 error = bus_dmamap_create(sc->sc_tag_data, BUS_DMA_ALLOCNOW,
228 &ccb->ccb_dmamap);
229 if (error) {
230 device_printf(sc->sc_dev,
231 "unable to create dmamap for port %d "
232 "ccb %d\n", port, i);
233 goto freeport;
234 }
235
236 /*
237 * WARNING!!! Access to the rfis is only allowed under very
238 * carefully controlled circumstances because it
239 * is located in the LRAM and reading from the
240 * LRAM has hardware issues which can blow the
241 * port up. I kid you not (from Linux, and
242 * verified by testing here).
243 */
244 callout_init(&ccb->ccb_timeout);
245 ccb->ccb_slot = i;
246 ccb->ccb_port = ap;
247 ccb->ccb_prb = &ap->ap_prbs[i];
248 ccb->ccb_prb_paddr = SILI_DMA_DVA(ap->ap_dmamem_prbs) +
249 sizeof(*ccb->ccb_prb) * i;
250 ccb->ccb_xa.fis = &ccb->ccb_prb->prb_h2d;
251 prb = bus_space_kva(ap->ap_sc->sc_iot, ap->ap_ioh,
252 SILI_PREG_LRAM_SLOT(i));
253 ccb->ccb_prb_lram = prb;
254 /*
255 * Point our rfis to host-memory instead of the LRAM PRB.
256 * It will be copied back if ATA_F_AUTOSENSE is set. The
257 * LRAM PRB is buggy.
258 */
259 /*ccb->ccb_xa.rfis = &prb->prb_d2h;*/
260 ccb->ccb_xa.rfis = (void *)ccb->ccb_xa.fis;
261
262 ccb->ccb_xa.packetcmd = prb_packet(ccb->ccb_prb);
263 ccb->ccb_xa.tag = i;
264
265 ccb->ccb_xa.state = ATA_S_COMPLETE;
266
267 /*
268 * Reserve CCB[1] as the error CCB. It doesn't matter
269 * which one we use for the Sili controllers.
270 */
271 if (i == 1)
272 ap->ap_err_ccb = ccb;
273 else
274 sili_put_ccb(ccb);
275 }
276 /*
277 * Do not call sili_port_init() here, the helper thread will
278 * call it for the parallel probe
279 */
280 sili_os_start_port(ap);
281 return(0);
282freeport:
283 sili_port_free(sc, port);
284 return (rc);
285}
286
287/*
288 * This is called once by the low level attach (from the helper thread)
289 * to get the port state machine rolling, and typically only called again
290 * on a hot-plug insertion event.
291 *
292 * This is called for PM attachments and hot-plug insertion events, and
293 * typically not called again until after an unplug/replug sequence.
294 *
295 * Returns 0 if a device is successfully detected.
296 */
297int
298sili_port_init(struct sili_port *ap)
299{
300 /*
301 * Do a very hard reset of the port
302 */
303 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET);
304 sili_os_sleep(10);
305 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET);
306
307 /*
308 * Register initialization
309 */
310 sili_pwrite(ap, SILI_PREG_FIFO_CTL,
311 SILI_PREG_FIFO_CTL_ENCODE(1024, 1024));
312 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_32BITDMA |
313 SILI_PREG_CTL_PMA);
314 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_NOAUTOCC);
315 if (ap->ap_sc->sc_flags & SILI_F_SSNTF)
316 sili_pwrite(ap, SILI_PREG_SNTF, -1);
317 ap->ap_probe = ATA_PROBE_NEED_HARD_RESET;
318 ap->ap_pmcount = 0;
319 sili_port_interrupt_enable(ap);
320 return (0);
321}
322
323/*
324 * Handle an errored port. This routine is called when the only
325 * commands left on the queue are expired, meaning we can safely
326 * go through a port init to clear its state.
327 *
328 * We complete the expired CCBs and then restart the queue.
329 */
330static
331void
332sili_port_reinit(struct sili_port *ap)
333{
334 struct sili_ccb *ccb;
335 struct ata_port *at;
336 int slot;
337 int target;
338 u_int32_t data;
339
340 if (bootverbose || 1) {
341 kprintf("%s: reiniting port after error reent=%d "
342 "expired=%08x\n",
343 PORTNAME(ap),
344 (ap->ap_flags & AP_F_REINIT_ACTIVE),
345 ap->ap_expired);
346 }
347
348 /*
349 * Clear port resume, clear bits 16:13 in the port device status
350 * register. This is from the data sheet.
351 *
352 * Data sheet does not specify a delay but it seems prudent.
353 */
354 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME);
355 sili_os_sleep(10);
356 for (target = 0; target < SILI_MAX_PMPORTS; ++target) {
357 data = sili_pread(ap, SILI_PREG_PM_STATUS(target));
358 data &= ~(SILI_PREG_PM_STATUS_SERVICE |
359 SILI_PREG_PM_STATUS_LEGACY |
360 SILI_PREG_PM_STATUS_NATIVE |
361 SILI_PREG_PM_STATUS_VBSY);
362 sili_pwrite(ap, SILI_PREG_PM_STATUS(target), data);
363 sili_pwrite(ap, SILI_PREG_PM_QACTIVE(target), 0);
364 }
365
366 /*
367 * Issue a Port Initialize and wait for it to clear. This flushes
368 * commands but does not reset the port. Then wait for port ready.
369 */
370 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_INIT);
371 if (sili_pwait_clr_to(ap, 5000, SILI_PREG_STATUS, SILI_PREG_CTL_INIT)) {
372 kprintf("%s: Unable to reinit, port failed\n",
373 PORTNAME(ap));
374 }
375 if (sili_pwait_set(ap, SILI_PREG_STATUS, SILI_PREG_STATUS_READY)) {
376 kprintf("%s: Unable to reinit, port will not come ready\n",
377 PORTNAME(ap));
378 }
379
380 /*
381 * If reentrant, stop here. Otherwise the state for the original
382 * ahci_port_reinit() will get ripped out from under it.
383 */
384 if (ap->ap_flags & AP_F_REINIT_ACTIVE)
385 return;
386 ap->ap_flags |= AP_F_REINIT_ACTIVE;
387
388 /*
389 * Read the LOG ERROR page for targets that returned a specific
390 * D2H FIS with ERR set.
391 *
392 * Don't bother if we are already using the error CCB.
393 */
394 if ((ap->ap_flags & AP_F_ERR_CCB_RESERVED) == 0) {
395 for (target = 0; target < SILI_MAX_PMPORTS; ++target) {
396 at = &ap->ap_ata[target];
397 if (at->at_features & ATA_PORT_F_READLOG) {
398 at->at_features &= ~ATA_PORT_F_READLOG;
399 sili_port_read_ncq_error(ap, target);
400 }
401 }
402 }
403
404 /*
405 * Finally clean out the expired commands, we've probed the error
406 * status (or hopefully probed the error status). Well, ok,
407 * we probably didn't XXX.
408 */
409 while (ap->ap_expired) {
410 slot = ffs(ap->ap_expired) - 1;
411 ap->ap_expired &= ~(1 << slot);
412 KKASSERT(ap->ap_active & (1 << slot));
413 ap->ap_active &= ~(1 << slot);
414 --ap->ap_active_cnt;
415 ccb = &ap->ap_ccbs[slot];
416 ccb->ccb_xa.state = ATA_S_TIMEOUT;
417 ccb->ccb_done(ccb);
418 ccb->ccb_xa.complete(&ccb->ccb_xa);
419 }
420 ap->ap_flags &= ~AP_F_REINIT_ACTIVE;
421
422 /*
423 * Wow. All done. We can get the port moving again.
424 */
425 if (ap->ap_probe == ATA_PROBE_FAILED) {
426 kprintf("%s: reinit failed, port is dead\n", PORTNAME(ap));
427 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) {
428 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry);
429 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED;
430 ccb->ccb_xa.state = ATA_S_TIMEOUT;
431 ccb->ccb_done(ccb);
432 ccb->ccb_xa.complete(&ccb->ccb_xa);
433 }
434 } else {
435 sili_issue_pending_commands(ap, NULL);
436 }
437}
438
439/*
440 * Enable or re-enable interrupts on a port.
441 *
442 * This routine is called from the port initialization code or from the
443 * helper thread as the real interrupt may be forced to turn off certain
444 * interrupt sources.
445 */
446void
447sili_port_interrupt_enable(struct sili_port *ap)
448{
449 u_int32_t data;
450
451 data = SILI_PREG_INT_CCOMPLETE | SILI_PREG_INT_CERROR |
452 SILI_PREG_INT_PHYRDYCHG | SILI_PREG_INT_DEVEXCHG |
453 SILI_PREG_INT_DECODE | SILI_PREG_INT_CRC |
454 SILI_PREG_INT_HANDSHK | SILI_PREG_INT_PMCHANGE;
455 if (ap->ap_sc->sc_flags & SILI_F_SSNTF)
456 data |= SILI_PREG_INT_SDB;
457 sili_pwrite(ap, SILI_PREG_INT_ENABLE, data);
458}
459
460void
461sili_port_interrupt_redisable(struct sili_port *ap)
462{
463 u_int32_t data;
464
465 data = sili_read(ap->ap_sc, SILI_REG_GCTL);
466 data &= SILI_REG_GINT_PORTMASK;
467 data &= ~(1 << ap->ap_num);
468 sili_write(ap->ap_sc, SILI_REG_GCTL, data);
469}
470
471void
472sili_port_interrupt_reenable(struct sili_port *ap)
473{
474 u_int32_t data;
475
476 data = sili_read(ap->ap_sc, SILI_REG_GCTL);
477 data &= SILI_REG_GINT_PORTMASK;
478 data |= (1 << ap->ap_num);
479 sili_write(ap->ap_sc, SILI_REG_GCTL, data);
480}
481
482/*
483 * Run the port / target state machine from a main context.
484 *
485 * The state machine for the port is always run.
486 *
487 * If atx is non-NULL run the state machine for a particular target.
488 * If atx is NULL run the state machine for all targets.
489 */
490void
491sili_port_state_machine(struct sili_port *ap, int initial)
492{
493 struct ata_port *at;
494 u_int32_t data;
495 int target;
496 int didsleep;
497 int loop;
498
499 /*
500 * State machine for port. Note that CAM is not yet associated
501 * during the initial parallel probe and the port's probe state
502 * will not get past ATA_PROBE_NEED_IDENT.
503 */
504 {
505 if (initial == 0 && ap->ap_probe <= ATA_PROBE_NEED_HARD_RESET) {
506 kprintf("%s: Waiting 7 seconds on insertion\n",
507 PORTNAME(ap));
508 sili_os_sleep(7000);
509 initial = 1;
510 }
511 if (ap->ap_probe == ATA_PROBE_NEED_INIT)
512 sili_port_init(ap);
513 if (ap->ap_probe == ATA_PROBE_NEED_HARD_RESET)
514 sili_port_reset(ap, NULL, 1);
515 if (ap->ap_probe == ATA_PROBE_NEED_SOFT_RESET)
516 sili_port_reset(ap, NULL, 0);
517 if (ap->ap_probe == ATA_PROBE_NEED_IDENT)
518 sili_cam_probe(ap, NULL);
519 }
520 if (ap->ap_type != ATA_PORT_T_PM) {
521 if (ap->ap_probe == ATA_PROBE_FAILED) {
522 sili_cam_changed(ap, NULL, 0);
523 } else if (ap->ap_probe >= ATA_PROBE_NEED_IDENT) {
524 sili_cam_changed(ap, NULL, 1);
525 }
526 return;
527 }
528
529 /*
530 * Port Multiplier state machine.
531 *
532 * Get a mask of changed targets and combine with any runnable
533 * states already present.
534 */
535 for (loop = 0; ;++loop) {
536 if (sili_pm_read(ap, 15, SATA_PMREG_EINFO, &data)) {
537 kprintf("%s: PM unable to read hot-plug bitmap\n",
538 PORTNAME(ap));
539 break;
540 }
541
542 /*
543 * Do at least one loop, then stop if no more state changes
544 * have occured. The PM might not generate a new
545 * notification until we clear the entire bitmap.
546 */
547 if (loop && data == 0)
548 break;
549
550 /*
551 * New devices showing up in the bitmap require some spin-up
552 * time before we start probing them. Reset didsleep. The
553 * first new device we detect will sleep before probing.
554 *
555 * This only applies to devices whos change bit is set in
556 * the data, and does not apply to the initial boot-time
557 * probe.
558 */
559 didsleep = 0;
560
561 for (target = 0; target < ap->ap_pmcount; ++target) {
562 at = &ap->ap_ata[target];
563
564 /*
565 * Check the target state for targets behind the PM
566 * which have changed state. This will adjust
567 * at_probe and set ATA_PORT_F_RESCAN
568 *
569 * We want to wait at least 10 seconds before probing
570 * a newly inserted device. If the check status
571 * indicates a device is present and in need of a
572 * hard reset, we make sure we have slept before
573 * continuing.
574 *
575 * We also need to wait at least 1 second for the
576 * PHY state to change after insertion, if we
577 * haven't already waited the 10 seconds.
578 *
579 * NOTE: When pm_check_good finds a good port it
580 * typically starts us in probe state
581 * NEED_HARD_RESET rather than INIT.
582 */
583 if (data & (1 << target)) {
584 if (initial == 0 && didsleep == 0)
585 sili_os_sleep(1000);
586 sili_pm_check_good(ap, target);
587 if (initial == 0 && didsleep == 0 &&
588 at->at_probe <= ATA_PROBE_NEED_HARD_RESET
589 ) {
590 didsleep = 1;
591 kprintf("%s: Waiting 10 seconds on insertion\n", PORTNAME(ap));
592 sili_os_sleep(10000);
593 }
594 }
595
596 /*
597 * Report hot-plug events before the probe state
598 * really gets hot. Only actual events are reported
599 * here to reduce spew.
600 */
601 if (data & (1 << target)) {
602 kprintf("%s: HOTPLUG (PM) - ", ATANAME(ap, at));
603 switch(at->at_probe) {
604 case ATA_PROBE_NEED_INIT:
605 case ATA_PROBE_NEED_HARD_RESET:
606 kprintf("Device inserted\n");
607 break;
608 case ATA_PROBE_FAILED:
609 kprintf("Device removed\n");
610 break;
611 default:
612 kprintf("Device probe in progress\n");
613 break;
614 }
615 }
616
617 /*
618 * Run through the state machine as necessary if
619 * the port is not marked failed.
620 *
621 * The state machine may stop at NEED_IDENT if
622 * CAM is not yet attached.
623 *
624 * Acquire exclusive access to the port while we
625 * are doing this. This prevents command-completion
626 * from queueing commands for non-polled targets
627 * inbetween our probe steps. We need to do this
628 * because the reset probes can generate severe PHY
629 * and protocol errors and soft-brick the port.
630 */
631 if (at->at_probe != ATA_PROBE_FAILED &&
632 at->at_probe != ATA_PROBE_GOOD) {
633 if (at->at_probe == ATA_PROBE_NEED_INIT)
634 sili_pm_port_init(ap, at);
635 if (at->at_probe == ATA_PROBE_NEED_HARD_RESET)
636 sili_port_reset(ap, at, 1);
637 if (at->at_probe == ATA_PROBE_NEED_SOFT_RESET)
638 sili_port_reset(ap, at, 0);
639 if (at->at_probe == ATA_PROBE_NEED_IDENT)
640 sili_cam_probe(ap, at);
641 }
642
643 /*
644 * Add or remove from CAM
645 */
646 if (at->at_features & ATA_PORT_F_RESCAN) {
647 at->at_features &= ~ATA_PORT_F_RESCAN;
648 if (at->at_probe == ATA_PROBE_FAILED) {
649 sili_cam_changed(ap, at, 0);
650 } else if (at->at_probe >= ATA_PROBE_NEED_IDENT) {
651 sili_cam_changed(ap, at, 1);
652 }
653 }
654 data &= ~(1 << target);
655 }
656 if (data) {
657 kprintf("%s: WARNING (PM): extra bits set in "
658 "EINFO: %08x\n", PORTNAME(ap), data);
659 while (target < SILI_MAX_PMPORTS) {
660 sili_pm_check_good(ap, target);
661 ++target;
662 }
663 }
664 }
665}
666
667/*
668 * De-initialize and detach a port.
669 */
670void
671sili_port_free(struct sili_softc *sc, u_int port)
672{
673 struct sili_port *ap = sc->sc_ports[port];
674 struct sili_ccb *ccb;
675
676 /*
677 * Ensure port is disabled and its interrupts are all flushed.
678 */
679 if (ap->ap_sc) {
680 sili_os_stop_port(ap);
681 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK);
682 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET);
683 sili_write(ap->ap_sc, SILI_REG_GCTL,
684 sili_read(ap->ap_sc, SILI_REG_GCTL) &
685 ~SILI_REG_GINT_PORTST(ap->ap_num));
686 }
687
688 if (ap->ap_ccbs) {
689 while ((ccb = sili_get_ccb(ap)) != NULL) {
690 if (ccb->ccb_dmamap) {
691 bus_dmamap_destroy(sc->sc_tag_data,
692 ccb->ccb_dmamap);
693 ccb->ccb_dmamap = NULL;
694 }
695 }
696 if ((ccb = ap->ap_err_ccb) != NULL) {
697 if (ccb->ccb_dmamap) {
698 bus_dmamap_destroy(sc->sc_tag_data,
699 ccb->ccb_dmamap);
700 ccb->ccb_dmamap = NULL;
701 }
702 ap->ap_err_ccb = NULL;
703 }
704 kfree(ap->ap_ccbs, M_DEVBUF);
705 ap->ap_ccbs = NULL;
706 }
707
708 if (ap->ap_dmamem_prbs) {
709 sili_dmamem_free(sc, ap->ap_dmamem_prbs);
710 ap->ap_dmamem_prbs = NULL;
711 }
712 if (ap->ap_ata) {
713 kfree(ap->ap_ata, M_DEVBUF);
714 ap->ap_ata = NULL;
715 }
716 if (ap->ap_err_scratch) {
717 kfree(ap->ap_err_scratch, M_DEVBUF);
718 ap->ap_err_scratch = NULL;
719 }
720
721 /* bus_space(9) says we dont free the subregions handle */
722
723 kfree(ap, M_DEVBUF);
724 sc->sc_ports[port] = NULL;
725}
726
727/*
728 * Reset a port.
729 *
730 * If hard is 0 perform a softreset of the port.
731 * If hard is 1 perform a hard reset of the port.
732 * If hard is 2 perform a hard reset of the port and cycle the phy.
733 *
734 * If at is non-NULL an indirect port via a port-multiplier is being
735 * reset, otherwise a direct port is being reset.
736 *
737 * NOTE: Indirect ports can only be soft-reset.
738 */
739int
740sili_port_reset(struct sili_port *ap, struct ata_port *at, int hard)
741{
742 int rc;
743
744 if (hard) {
745 if (at)
746 rc = sili_pm_hardreset(ap, at->at_target, hard);
747 else
748 rc = sili_port_hardreset(ap);
749 } else {
750 if (at)
751 rc = sili_pm_softreset(ap, at->at_target);
752 else
753 rc = sili_port_softreset(ap);
754 }
755 return(rc);
756}
757
758/*
759 * SILI soft reset, Section 10.4.1
760 *
761 * (at) will be NULL when soft-resetting a directly-attached device, and
762 * non-NULL when soft-resetting a device through a port multiplier.
763 *
764 * This function keeps port communications intact and attempts to generate
765 * a reset to the connected device using device commands.
766 */
767int
768sili_port_softreset(struct sili_port *ap)
769{
770 struct sili_ccb *ccb = NULL;
771 struct sili_prb *prb;
772 int error;
773 u_int32_t sig;
774
775 error = EIO;
776
777 if (bootverbose)
778 kprintf("%s: START SOFTRESET\n", PORTNAME(ap));
779
780 crit_enter();
781 ap->ap_state = AP_S_NORMAL;
782
783 /*
784 * Prep the special soft-reset SII command.
785 */
786 ccb = sili_get_err_ccb(ap);
787 ccb->ccb_done = sili_empty_done;
788 ccb->ccb_xa.flags = ATA_F_POLL | ATA_F_AUTOSENSE | ATA_F_EXCLUSIVE;
789 ccb->ccb_xa.complete = sili_dummy_done;
790 ccb->ccb_xa.at = NULL;
791
792 prb = ccb->ccb_prb;
793 bzero(&prb->prb_h2d, sizeof(prb->prb_h2d));
794 prb->prb_h2d.flags = 0;
795 prb->prb_control = SILI_PRB_CTRL_SOFTRESET;
796 prb->prb_override = 0;
797 prb->prb_xfer_count = 0;
798
799 ccb->ccb_xa.state = ATA_S_PENDING;
800
801 /*
802 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb
803 */
804 if (sili_poll(ccb, 8000, sili_quick_timeout) != ATA_S_COMPLETE) {
805 kprintf("%s: First FIS failed\n", PORTNAME(ap));
806 goto err;
807 }
808
809 sig = (prb->prb_d2h.lba_high << 24) |
810 (prb->prb_d2h.lba_mid << 16) |
811 (prb->prb_d2h.lba_low << 8) |
812 (prb->prb_d2h.sector_count);
813 if (bootverbose)
814 kprintf("%s: SOFTRESET SIGNATURE %08x\n", PORTNAME(ap), sig);
815
816 /*
817 * If the softreset is trying to clear a BSY condition after a
818 * normal portreset we assign the port type.
819 *
820 * If the softreset is being run first as part of the ccb error
821 * processing code then report if the device signature changed
822 * unexpectedly.
823 */
824 if (ap->ap_type == ATA_PORT_T_NONE) {
825 ap->ap_type = sili_port_signature(ap, NULL, sig);
826 } else {
827 if (sili_port_signature(ap, NULL, sig) != ap->ap_type) {
828 kprintf("%s: device signature unexpectedly "
829 "changed\n", PORTNAME(ap));
830 error = EBUSY; /* XXX */
831 }
832 }
833 error = 0;
834err:
835 if (ccb != NULL) {
836 sili_put_err_ccb(ccb);
837 }
838
839 /*
840 * If we failed to softreset make the port quiescent, otherwise
841 * make sure the port's start/stop state matches what it was on
842 * entry.
843 *
844 * Don't kill the port if the softreset is on a port multiplier
845 * target, that would kill all the targets!
846 */
847 if (bootverbose) {
848 kprintf("%s: END SOFTRESET %d prob=%d state=%d\n",
849 PORTNAME(ap), error, ap->ap_probe, ap->ap_state);
850 }
851 if (error) {
852 sili_port_hardstop(ap);
853 /* ap_probe set to failed */
854 } else {
855 ap->ap_probe = ATA_PROBE_NEED_IDENT;
856 ap->ap_pmcount = 1;
857 }
858 crit_exit();
859
860 sili_pwrite(ap, SILI_PREG_SERR, -1);
861 if (bootverbose)
862 kprintf("%s: END SOFTRESET\n", PORTNAME(ap));
863
864 return (error);
865}
866
867/*
868 * This function does a hard reset of the port. Note that the device
869 * connected to the port could still end-up hung. Phy detection is
870 * used to short-cut longer operations.
871 */
872int
873sili_port_hardreset(struct sili_port *ap)
874{
875 u_int32_t data;
876 int error;
877 int loop;
878
879 if (bootverbose)
880 kprintf("%s: START HARDRESET\n", PORTNAME(ap));
881
882 ap->ap_state = AP_S_NORMAL;
883
884 /*
885 * Set SCTL up for any speed restrictions before issuing the
886 * device reset. This may also take us out of an INIT state
887 * (if we were previously in a continuous reset state from
888 * sili_port_listen()).
889 */
890 data = SILI_PREG_SCTL_SPM_NONE |
891 SILI_PREG_SCTL_IPM_NONE |
892 SILI_PREG_SCTL_SPD_NONE |
893 SILI_PREG_SCTL_DET_NONE;
894 if (SiliForceGen1 & (1 << ap->ap_num)) {
895 data &= ~SILI_PREG_SCTL_SPD_NONE;
896 data |= SILI_PREG_SCTL_SPD_GEN1;
897 }
898 sili_pwrite(ap, SILI_PREG_SCTL, data);
899
900 /*
901 * The transition from a continuous COMRESET state from
902 * sili_port_listen() back to device detect can take a
903 * few seconds. It's quite non-deterministic. Most of
904 * the time it takes far less. Use a polling loop to
905 * wait.
906 */
907 loop = 4000;
908 while (loop > 0) {
909 data = sili_pread(ap, SILI_PREG_SSTS);
910 if (data & SILI_PREG_SSTS_DET)
911 break;
912 loop -= sili_os_softsleep();
913 }
914 sili_os_sleep(100);
915
916 /*
917 * Issue Device Reset, give the phy a little time to settle down.
918 *
919 * NOTE: Unlike Port Reset, the port ready signal will not
920 * go active unless a device is established to be on
921 * the port.
922 */
923 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA);
924 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME);
925 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET);
926 if (sili_pwait_clr(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET)) {
927 kprintf("%s: hardreset failed to clear\n", PORTNAME(ap));
928 }
929 sili_os_sleep(20);
930
931 /*
932 * Try to determine if there is a device on the port.
933 *
934 * Give the device 3/10 second to at least be detected.
935 */
936 loop = 300;
937 while (loop > 0) {
938 data = sili_pread(ap, SILI_PREG_SSTS);
939 if (data & SILI_PREG_SSTS_DET)
940 break;
941 loop -= sili_os_softsleep();
942 }
943 if (loop <= 0) {
944 if (bootverbose) {
945 kprintf("%s: Port appears to be unplugged\n",
946 PORTNAME(ap));
947 }
948 error = ENODEV;
949 goto done;
950 }
951
952 /*
953 * There is something on the port. Give the device 3 seconds
954 * to detect.
955 */
956 if (sili_pwait_eq(ap, 3000, SILI_PREG_SSTS,
957 SILI_PREG_SSTS_DET, SILI_PREG_SSTS_DET_DEV)) {
958 if (bootverbose) {
959 kprintf("%s: Device may be powered down\n",
960 PORTNAME(ap));
961 }
962 error = ENODEV;
963 goto pmdetect;
964 }
965
966 /*
967 * We got something that definitely looks like a device. Give
968 * the device time to send us its first D2H FIS.
969 *
970 * This effectively waits for BSY to clear.
971 */
972 if (sili_pwait_set_to(ap, 3000, SILI_PREG_STATUS,
973 SILI_PREG_STATUS_READY)) {
974 error = EBUSY;
975 } else {
976 error = 0;
977 }
978
979pmdetect:
980 /*
981 * Do the PM port probe regardless of how things turned out above.
982 *
983 * If the PM port probe fails it will return the original error
984 * from above.
985 */
986 if (ap->ap_sc->sc_flags & SILI_F_SPM) {
987 error = sili_pm_port_probe(ap, error);
988 }
989
990done:
991 /*
992 * Finish up
993 */
994 switch(error) {
995 case 0:
996 if (ap->ap_type == ATA_PORT_T_PM)
997 ap->ap_probe = ATA_PROBE_GOOD;
998 else
999 ap->ap_probe = ATA_PROBE_NEED_SOFT_RESET;
1000 break;
1001 case ENODEV:
1002 /*
1003 * No device detected.
1004 */
1005 data = sili_pread(ap, SILI_PREG_SSTS);
1006
1007 switch(data & SATA_PM_SSTS_DET) {
1008 case SILI_PREG_SSTS_DET_DEV_NE:
1009 kprintf("%s: Device not communicating\n",
1010 PORTNAME(ap));
1011 break;
1012 case SILI_PREG_SSTS_DET_OFFLINE:
1013 kprintf("%s: PHY offline\n",
1014 PORTNAME(ap));
1015 break;
1016 default:
1017 kprintf("%s: No device detected\n",
1018 PORTNAME(ap));
1019 break;
1020 }
1021 sili_port_hardstop(ap);
1022 break;
1023 default:
1024 /*
1025 * (EBUSY)
1026 */
1027 kprintf("%s: Device on port is bricked\n",
1028 PORTNAME(ap));
1029 sili_port_hardstop(ap);
1030 break;
1031 }
1032 sili_pwrite(ap, SILI_PREG_SERR, -1);
1033
1034 if (bootverbose)
1035 kprintf("%s: END HARDRESET %d\n", PORTNAME(ap), error);
1036 return (error);
1037}
1038
1039/*
1040 * Hard-stop on hot-swap device removal. See 10.10.1
1041 *
1042 * Place the port in a mode that will allow it to detect hot-swap insertions.
1043 * This is a bit imprecise because just setting-up SCTL to DET_INIT doesn't
1044 * seem to do the job.
1045 */
1046void
1047sili_port_hardstop(struct sili_port *ap)
1048{
1049 struct sili_ccb *ccb;
1050 struct ata_port *at;
1051 int i;
1052 int slot;
1053
1054 ap->ap_state = AP_S_FATAL_ERROR;
1055 ap->ap_probe = ATA_PROBE_FAILED;
1056 ap->ap_type = ATA_PORT_T_NONE;
1057
1058 /*
1059 * Clean up AT sub-ports on SATA port.
1060 */
1061 for (i = 0; ap->ap_ata && i < SILI_MAX_PMPORTS; ++i) {
1062 at = &ap->ap_ata[i];
1063 at->at_type = ATA_PORT_T_NONE;
1064 at->at_probe = ATA_PROBE_FAILED;
1065 at->at_features &= ~ATA_PORT_F_READLOG;
1066 }
1067
1068 /*
1069 * Kill the port. Don't bother waiting for it to transition
1070 * back up.
1071 */
1072 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET);
1073 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) {
1074 kprintf("%s: Port will not go into reset\n",
1075 PORTNAME(ap));
1076 }
1077 sili_os_sleep(10);
1078 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET);
1079
1080 /*
1081 * Turn off port-multiplier control bit
1082 */
1083 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA);
1084
1085 /*
1086 * Clean up the command list.
1087 */
1088 while (ap->ap_active) {
1089 slot = ffs(ap->ap_active) - 1;
1090 ap->ap_active &= ~(1 << slot);
1091 ap->ap_expired &= ~(1 << slot);
1092 --ap->ap_active_cnt;
1093 ccb = &ap->ap_ccbs[slot];
1094 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_RUNNING) {
1095 callout_stop(&ccb->ccb_timeout);
1096 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING;
1097 }
1098 ccb->ccb_xa.flags &= ~(ATA_F_TIMEOUT_DESIRED |
1099 ATA_F_TIMEOUT_EXPIRED);
1100 ccb->ccb_xa.state = ATA_S_TIMEOUT;
1101 ccb->ccb_done(ccb);
1102 ccb->ccb_xa.complete(&ccb->ccb_xa);
1103 }
1104 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) {
1105 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry);
1106 ccb->ccb_xa.state = ATA_S_TIMEOUT;
1107 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED;
1108 ccb->ccb_done(ccb);
1109 ccb->ccb_xa.complete(&ccb->ccb_xa);
1110 }
1111 KKASSERT(ap->ap_active_cnt == 0);
1112
1113 /*
1114 * Put the port into a listen mode, we want to get insertion/removal
1115 * events.
1116 */
1117 sili_port_listen(ap);
1118}
1119
1120/*
1121 * Place port into a listen mode for hotplug events only. The port has
1122 * already been reset and the command processor may not be ready due
1123 * to the lack of a device.
1124 */
1125void
1126sili_port_listen(struct sili_port *ap)
1127{
1128 u_int32_t data;
1129
1130#if 1
1131 data = SILI_PREG_SCTL_SPM_NONE |
1132 SILI_PREG_SCTL_IPM_NONE |
1133 SILI_PREG_SCTL_SPD_NONE |
1134 SILI_PREG_SCTL_DET_INIT;
1135 if (SiliForceGen1 & (1 << ap->ap_num)) {
1136 data &= ~SILI_PREG_SCTL_SPD_NONE;
1137 data |= SILI_PREG_SCTL_SPD_GEN1;
1138 }
1139#endif
1140 sili_os_sleep(20);
1141 sili_pwrite(ap, SILI_PREG_SERR, -1);
1142 sili_pwrite(ap, SILI_PREG_INT_ENABLE, SILI_PREG_INT_PHYRDYCHG |
1143 SILI_PREG_INT_DEVEXCHG);
1144}
1145
1146/*
1147 * Figure out what type of device is connected to the port, ATAPI or
1148 * DISK.
1149 */
1150int
1151sili_port_signature(struct sili_port *ap, struct ata_port *at, u_int32_t sig)
1152{
1153 if (bootverbose)
1154 kprintf("%s: sig %08x\n", ATANAME(ap, at), sig);
1155 if ((sig & 0xffff0000) == (SATA_SIGNATURE_ATAPI & 0xffff0000)) {
1156 return(ATA_PORT_T_ATAPI);
1157 } else if ((sig & 0xffff0000) ==
1158 (SATA_SIGNATURE_PORT_MULTIPLIER & 0xffff0000)) {
1159 return(ATA_PORT_T_PM);
1160 } else {
1161 return(ATA_PORT_T_DISK);
1162 }
1163}
1164
1165/*
1166 * Load the DMA descriptor table for a CCB's buffer.
1167 *
1168 * NOTE: ATA_F_PIO is auto-selected by sili part.
1169 */
1170int
1171sili_load_prb(struct sili_ccb *ccb)
1172{
1173 struct sili_port *ap = ccb->ccb_port;
1174 struct sili_softc *sc = ap->ap_sc;
1175 struct ata_xfer *xa = &ccb->ccb_xa;
1176 struct sili_prb *prb = ccb->ccb_prb;
1177 struct sili_sge *sge;
1178 bus_dmamap_t dmap = ccb->ccb_dmamap;
1179 int error;
1180
1181 /*
1182 * Set up the PRB. The PRB contains 2 SGE's (1 if it is an ATAPI
1183 * command). The SGE must be set up to link to the rest of our
1184 * SGE array, in blocks of four SGEs (a SGE table) starting at
1185 */
1186 prb->prb_xfer_count = 0;
1187 prb->prb_control = 0;
1188 prb->prb_override = 0;
1189 sge = (ccb->ccb_xa.flags & ATA_F_PACKET) ?
1190 &prb->prb_sge_packet : &prb->prb_sge_normal;
1191 if (xa->datalen == 0) {
1192 sge->sge_flags = SILI_SGE_FLAGS_TRM | SILI_SGE_FLAGS_DRD;
1193 sge->sge_count = 0;
1194 return (0);
1195 }
1196
1197 if (ccb->ccb_xa.flags & ATA_F_READ)
1198 prb->prb_control |= SILI_PRB_CTRL_READ;
1199 if (ccb->ccb_xa.flags & ATA_F_WRITE)
1200 prb->prb_control |= SILI_PRB_CTRL_WRITE;
1201 sge->sge_flags = SILI_SGE_FLAGS_LNK;
1202 sge->sge_count = 0;
1203 sge->sge_paddr = ccb->ccb_prb_paddr +
1204 offsetof(struct sili_prb, prb_sge[0]);
1205
1206 /*
1207 * Load our sge array.
1208 */
1209 error = bus_dmamap_load(sc->sc_tag_data, dmap,
1210 xa->data, xa->datalen,
1211 sili_load_prb_callback,
1212 ccb,
1213 ((xa->flags & ATA_F_NOWAIT) ?
1214 BUS_DMA_NOWAIT : BUS_DMA_WAITOK));
1215 if (error != 0) {
1216 kprintf("%s: error %d loading dmamap\n", PORTNAME(ap), error);
1217 return (1);
1218 }
1219
1220 bus_dmamap_sync(sc->sc_tag_data, dmap,
1221 (xa->flags & ATA_F_READ) ?
1222 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1223
1224 return (0);
1225}
1226
1227/*
1228 * Callback from BUSDMA system to load the segment list.
1229 *
1230 * The scatter/gather table is loaded by the sili chip in blocks of
1231 * four SGE's. If a continuance is required the last entry in each
1232 * block must point to the next block.
1233 */
1234static
1235void
1236sili_load_prb_callback(void *info, bus_dma_segment_t *segs, int nsegs,
1237 int error)
1238{
1239 struct sili_ccb *ccb = info;
1240 struct sili_sge *sge;
1241 int sgi;
1242
1243 KKASSERT(nsegs <= SILI_MAX_SGET);
1244
1245 sgi = 0;
1246 sge = &ccb->ccb_prb->prb_sge[0];
1247 while (nsegs) {
1248 if ((sgi & 3) == 3) {
1249 sge->sge_paddr = htole64(ccb->ccb_prb_paddr +
1250 offsetof(struct sili_prb,
1251 prb_sge[sgi + 1]));
1252 sge->sge_count = 0;
1253 sge->sge_flags = SILI_SGE_FLAGS_LNK;
1254 } else {
1255 sge->sge_paddr = htole64(segs->ds_addr);
1256 sge->sge_count = htole32(segs->ds_len);
1257 sge->sge_flags = 0;
1258 --nsegs;
1259 ++segs;
1260 }
1261 ++sge;
1262 ++sgi;
1263 }
1264 --sge;
1265 sge->sge_flags |= SILI_SGE_FLAGS_TRM;
1266}
1267
1268void
1269sili_unload_prb(struct sili_ccb *ccb)
1270{
1271 struct sili_port *ap = ccb->ccb_port;
1272 struct sili_softc *sc = ap->ap_sc;
1273 struct ata_xfer *xa = &ccb->ccb_xa;
1274 bus_dmamap_t dmap = ccb->ccb_dmamap;
1275
1276 if (xa->datalen != 0) {
1277 bus_dmamap_sync(sc->sc_tag_data, dmap,
1278 (xa->flags & ATA_F_READ) ?
1279 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1280
1281 bus_dmamap_unload(sc->sc_tag_data, dmap);
1282
1283 if (ccb->ccb_xa.flags & ATA_F_NCQ)
1284 xa->resid = 0;
1285 else
1286 xa->resid = xa->datalen -
1287 le32toh(ccb->ccb_prb->prb_xfer_count);
1288 }
1289}
1290
1291/*
1292 * Start a command and poll for completion.
1293 *
1294 * timeout is in ms and only counts once the command gets on-chip.
1295 *
1296 * Returns ATA_S_* state, compare against ATA_S_COMPLETE to determine
1297 * that no error occured.
1298 *
1299 * NOTE: If the caller specifies a NULL timeout function the caller is
1300 * responsible for clearing hardware state on failure, but we will
1301 * deal with removing the ccb from any pending queue.
1302 *
1303 * NOTE: NCQ should never be used with this function.
1304 *
1305 * NOTE: If the port is in a failed state and stopped we do not try
1306 * to activate the ccb.
1307 */
1308int
1309sili_poll(struct sili_ccb *ccb, int timeout,
1310 void (*timeout_fn)(struct sili_ccb *))
1311{
1312 struct sili_port *ap = ccb->ccb_port;
1313
1314 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR) {
1315 ccb->ccb_xa.state = ATA_S_ERROR;
1316 return(ccb->ccb_xa.state);
1317 }
1318
1319 KKASSERT((ap->ap_expired & (1 << ccb->ccb_slot)) == 0);
1320 sili_start(ccb);
1321
1322 do {
1323 sili_port_intr(ap, 1);
1324 switch(ccb->ccb_xa.state) {
1325 case ATA_S_ONCHIP:
1326 timeout -= sili_os_softsleep();
1327 break;
1328 case ATA_S_PENDING:
1329 /*
1330 * The packet can get stuck on the pending queue
1331 * if the port refuses to come ready. XXX
1332 */
1333#if 0
1334 if (xxx AP_F_EXCLUSIVE_ACCESS)
1335 timeout -= sili_os_softsleep();
1336 else
1337#endif
1338 sili_os_softsleep();
1339 sili_check_active_timeouts(ap);
1340 break;
1341 default:
1342 return (ccb->ccb_xa.state);
1343 }
1344 } while (timeout > 0);
1345
1346 /*
1347 * Don't spew if this is a probe during hard reset
1348 */
1349 if (ap->ap_probe != ATA_PROBE_NEED_HARD_RESET) {
1350 kprintf("%s: Poll timeout slot %d\n",
1351 ATANAME(ap, ccb->ccb_xa.at),
1352 ccb->ccb_slot);
1353 }
1354
1355 timeout_fn(ccb);
1356
1357 return(ccb->ccb_xa.state);
1358}
1359
1360/*
1361 * When polling we have to check if the currently active CCB(s)
1362 * have timed out as the callout will be deadlocked while we
1363 * hold the port lock.
1364 */
1365void
1366sili_check_active_timeouts(struct sili_port *ap)
1367{
1368 struct sili_ccb *ccb;
1369 u_int32_t mask;
1370 int tag;
1371
1372 mask = ap->ap_active;
1373 while (mask) {
1374 tag = ffs(mask) - 1;
1375 mask &= ~(1 << tag);
1376 ccb = &ap->ap_ccbs[tag];
1377 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_EXPIRED) {
1378 sili_core_timeout(ccb, 0);
1379 }
1380 }
1381}
1382
1383static
1384__inline
1385void
1386sili_start_timeout(struct sili_ccb *ccb)
1387{
1388 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_DESIRED) {
1389 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_RUNNING;
1390 callout_reset(&ccb->ccb_timeout,
1391 (ccb->ccb_xa.timeout * hz + 999) / 1000,
1392 sili_ata_cmd_timeout_unserialized, ccb);
1393 }
1394}
1395
1396void
1397sili_start(struct sili_ccb *ccb)
1398{
1399 struct sili_port *ap = ccb->ccb_port;
1400#if 0
1401 struct sili_softc *sc = ap->ap_sc;
1402#endif
1403
1404 KKASSERT(ccb->ccb_xa.state == ATA_S_PENDING);
1405
1406 /*
1407 * Sync our SGE table and PRB
1408 */
1409 bus_dmamap_sync(ap->ap_dmamem_prbs->adm_tag,
1410 ap->ap_dmamem_prbs->adm_map,
1411 BUS_DMASYNC_PREWRITE);
1412
1413 /*
1414 * XXX dmamap for PRB XXX BUS_DMASYNC_PREWRITE
1415 */
1416
1417 /*
1418 * Controller will update shared memory!
1419 * XXX bus_dmamap_sync ... BUS_DMASYNC_PREREAD ...
1420 */
1421 /* Prepare RFIS area for write by controller */
1422
1423 /*
1424 * There's no point trying to optimize this, it only shaves a few
1425 * nanoseconds so just queue the command and call our generic issue.
1426 */
1427 sili_issue_pending_commands(ap, ccb);
1428}
1429
1430/*
1431 * Wait for all commands to complete processing. We hold the lock so no
1432 * new commands will be queued.
1433 */
1434void
1435sili_exclusive_access(struct sili_port *ap)
1436{
1437 while (ap->ap_active) {
1438 sili_port_intr(ap, 1);
1439 sili_os_softsleep();
1440 }
1441}
1442
1443/*
1444 * If ccb is not NULL enqueue and/or issue it.
1445 *
1446 * If ccb is NULL issue whatever we can from the queue. However, nothing
1447 * new is issued if the exclusive access flag is set or expired ccb's are
1448 * present.
1449 *
1450 * If existing commands are still active (ap_active) we can only
1451 * issue matching new commands.
1452 */
1453void
1454sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb)
1455{
1456 /*
1457 * Enqueue the ccb.
1458 *
1459 * If just running the queue and in exclusive access mode we
1460 * just return. Also in this case if there are any expired ccb's
1461 * we want to clear the queue so the port can be safely stopped.
1462 *
1463 * XXX sili chip - expiration needs to be per-target if PM supports
1464 * FBSS?
1465 */
1466 if (ccb) {
1467 TAILQ_INSERT_TAIL(&ap->ap_ccb_pending, ccb, ccb_entry);
1468 } else if (ap->ap_expired) {
1469 return;
1470 }
1471
1472 /*
1473 * Pull the next ccb off the queue and run it if possible.
1474 * If the port is not ready to accept commands enable the
1475 * ready interrupt instead of starting a new command.
1476 *
1477 * XXX limit ncqdepth for attached devices behind PM
1478 */
1479 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) {
1480 /*
1481 * Port may be wedged.
1482 */
1483 if ((sili_pread(ap, SILI_PREG_STATUS) &
1484 SILI_PREG_STATUS_READY) == 0) {
1485 kprintf("%s: slot %d NOT READY\n",
1486 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot);
1487 sili_pwrite(ap, SILI_PREG_INT_ENABLE,
1488 SILI_PREG_INT_READY);
1489 break;
1490 }
1491
1492 /*
1493 * Handle exclusivity requirements. ATA_F_EXCLUSIVE is used
1494 * when we may have to access the rfis which is stored in
1495 * the LRAM PRB. Unfortunately reading the LRAM PRB is
1496 * highly problematic, so requests (like PM requests) which
1497 * need to access the rfis use exclusive mode and then
1498 * access the copy made by the port interrupt code back in
1499 * host memory.
1500 */
1501 if (ap->ap_active & ~ap->ap_expired) {
1502 /*
1503 * There may be multiple ccb's already running,
1504 * if any are running and ap_run_flags sets
1505 * one of these flags then we know only one is
1506 * running.
1507 *
1508 * XXX Current AUTOSENSE code forces exclusivity
1509 * to simplify the code.
1510 */
1511 if (ap->ap_run_flags &
1512 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) {
1513 break;
1514 }
1515
1516 /*
1517 * If the ccb we want to run is exclusive and ccb's
1518 * are still active on the port, we can't queue it
1519 * yet.
1520 *
1521 * XXX Current AUTOSENSE code forces exclusivity
1522 * to simplify the code.
1523 */
1524 if (ccb->ccb_xa.flags &
1525 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) {
1526 break;
1527 }
1528 }
1529
1530 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry);
1531 ccb->ccb_xa.state = ATA_S_ONCHIP;
1532 ap->ap_active |= 1 << ccb->ccb_slot;
1533 ap->ap_active_cnt++;
1534 ap->ap_run_flags = ccb->ccb_xa.flags;
1535
1536 /*
1537 * We can't use the CMD_FIFO method because it requires us
1538 * building the PRB in the LRAM, and the LRAM is buggy. So
1539 * we use host memory for the PRB.
1540 */
1541 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot),
1542 (u_int32_t)ccb->ccb_prb_paddr);
1543 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot) + 4,
1544 (u_int32_t)(ccb->ccb_prb_paddr >> 32));
1545 /* sili_pwrite(ap, SILI_PREG_CMD_FIFO, ccb->ccb_slot); */
1546 sili_start_timeout(ccb);
1547 }
1548}
1549
1550void
1551sili_intr(void *arg)
1552{
1553 struct sili_softc *sc = arg;
1554 struct sili_port *ap;
1555 u_int32_t gint;
1556 int port;
1557
1558 /*
1559 * Check if the master enable is up, and whether any interrupts are
1560 * pending.
1561 *
1562 * Clear the ints we got.
1563 */
1564 if ((sc->sc_flags & SILI_F_INT_GOOD) == 0)
1565 return;
1566 gint = sili_read(sc, SILI_REG_GINT);
1567 if (gint == 0 || gint == 0xffffffff)
1568 return;
1569 sili_write(sc, SILI_REG_GINT, gint);
1570
1571 /*
1572 * Process interrupts for each port in a non-blocking fashion.
1573 */
1574 while (gint & SILI_REG_GINT_PORTMASK) {
1575 port = ffs(gint) - 1;
1576 ap = sc->sc_ports[port];
1577 if (ap) {
1578 if (sili_os_lock_port_nb(ap) == 0) {
1579 sili_port_intr(ap, 0);
1580 sili_os_unlock_port(ap);
1581 } else {
1582 sili_port_interrupt_redisable(ap);
1583 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT);
1584 }
1585 }
1586 gint &= ~(1 << port);
1587 }
1588}
1589
1590/*
1591 * Core called from helper thread.
1592 */
1593void
1594sili_port_thread_core(struct sili_port *ap, int mask)
1595{
1596 /*
1597 * Process any expired timedouts.
1598 */
1599 sili_os_lock_port(ap);
1600 if (mask & AP_SIGF_TIMEOUT) {
1601 sili_check_active_timeouts(ap);
1602 }
1603
1604 /*
1605 * Process port interrupts which require a higher level of
1606 * intervention.
1607 */
1608 if (mask & AP_SIGF_PORTINT) {
1609 sili_port_intr(ap, 1);
1610 sili_port_interrupt_reenable(ap);
1611 sili_os_unlock_port(ap);
1612 } else {
1613 sili_os_unlock_port(ap);
1614 }
1615}
1616
1617/*
1618 * Core per-port interrupt handler.
1619 *
1620 * If blockable is 0 we cannot call sili_os_sleep() at all and we can only
1621 * deal with normal command completions which do not require blocking.
1622 */
1623void
1624sili_port_intr(struct sili_port *ap, int blockable)
1625{
1626 struct sili_softc *sc = ap->ap_sc;
1627 u_int32_t is;
1628 int slot;
1629 struct sili_ccb *ccb = NULL;
1630 struct ata_port *ccb_at = NULL;
1631 u_int32_t active;
1632 u_int32_t finished;
1633 const u_int32_t blockable_mask = SILI_PREG_IST_PHYRDYCHG |
1634 SILI_PREG_IST_DEVEXCHG |
1635 SILI_PREG_IST_CERROR |
1636 SILI_PREG_IST_DECODE |
1637 SILI_PREG_IST_CRC |
1638 SILI_PREG_IST_HANDSHK;
1639 const u_int32_t fatal_mask = SILI_PREG_IST_PHYRDYCHG |
1640 SILI_PREG_IST_DEVEXCHG |
1641 SILI_PREG_IST_DECODE |
1642 SILI_PREG_IST_CRC |
1643 SILI_PREG_IST_HANDSHK;
1644
1645 enum { NEED_NOTHING, NEED_HOTPLUG_INSERT,
1646 NEED_HOTPLUG_REMOVE } need = NEED_NOTHING;
1647
1648 /*
1649 * NOTE: CCOMPLETE was automatically cleared when we read INT_STATUS.
1650 */
1651 is = sili_pread(ap, SILI_PREG_INT_STATUS);
1652 is &= SILI_PREG_IST_MASK;
1653 if (is & SILI_PREG_IST_CCOMPLETE)
1654 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CCOMPLETE);
1655
1656 /*
1657 * If we can't block then we can't handle these here. Disable
1658 * the interrupts in question so we don't live-lock, the helper
1659 * thread will re-enable them.
1660 *
1661 * If the port is in a completely failed state we do not want
1662 * to drop through to failed-command-processing if blockable is 0,
1663 * just let the thread deal with it all.
1664 *
1665 * Otherwise we fall through and still handle DHRS and any commands
1666 * which completed normally. Even if we are errored we haven't
1667 * stopped the port yet so CI/SACT are still good.
1668 */
1669 if (blockable == 0) {
1670 if (ap->ap_state == AP_S_FATAL_ERROR) {
1671 sili_port_interrupt_redisable(ap);
1672 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT);
1673 /*is &= ~blockable_mask;*/
1674 return;
1675 }
1676 if (is & blockable_mask) {
1677 sili_port_interrupt_redisable(ap);
1678 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT);
1679 /*is &= ~blockable_mask;*/
1680 return;
1681 }
1682 }
1683
1684 if (is & SILI_PREG_IST_CERROR) {
1685 /*
1686 * Command failed (blockable).
1687 *
1688 * This stops command processing. We can extract the PM
1689 * target from the PMP field in SILI_PREG_CONTEXT. The
1690 * tag is not necessarily valid so don't use that.
1691 *
1692 * We must then expire all CCB's for that target and resume
1693 * processing if any other targets have active commands.
1694 * Particular error codes can be recovered by reading the LOG
1695 * page.
1696 *
1697 * The expire handling code will do the rest, which is
1698 * basically to reset the port once the only active
1699 * commands remaining are all expired.
1700 */
1701 u_int32_t error;
1702 int target;
1703 int resume = 1;
1704
1705 target = (sili_pread(ap, SILI_PREG_CONTEXT) >>
1706 SILI_PREG_CONTEXT_PMPORT_SHIFT) &
1707 SILI_PREG_CONTEXT_PMPORT_MASK;
1708 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CERROR);
1709 active = ap->ap_active & ~ap->ap_expired;
1710 error = sili_pread(ap, SILI_PREG_CERROR);
1711 kprintf("%s.%d target error %d active=%08x hactive=%08x "
1712 "SERR=%b\n",
1713 PORTNAME(ap), target, error,
1714 active, sili_pread(ap, SILI_PREG_SLOTST),
1715 sili_pread(ap, SILI_PREG_SERR), SILI_PFMT_SERR);
1716
1717 while (active) {
1718 slot = ffs(active) - 1;
1719 ccb = &ap->ap_ccbs[slot];
1720 if ((ccb_at = ccb->ccb_xa.at) == NULL)
1721 ccb_at = &ap->ap_ata[0];
1722 if (target == ccb_at->at_target) {
1723 if ((ccb->ccb_xa.flags & ATA_F_NCQ) &&
1724 (error == SILI_PREG_CERROR_DEVICE ||
1725 error == SILI_PREG_CERROR_SDBERROR)) {
1726 ccb_at->at_features |= ATA_PORT_F_READLOG;
1727 }
1728 if (sili_core_timeout(ccb, 1) == 0)
1729 resume = 0;
1730 }
1731 active &= ~(1 << slot);
1732 }
1733
1734 /*
1735 * Resume will be 0 if the timeout reinited and restarted
1736 * the port. Otherwise we resume the port to allow other
1737 * commands to complete.
1738 */
1739 if (resume)
1740 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESUME);
1741 }
1742
1743 /*
1744 * Device notification to us (non-blocking)
1745 *
1746 * This is interrupt status SILIPREG_IST_SDB
1747 *
1748 * NOTE! On some parts notification bits can get set without
1749 * generating an interrupt. It is unclear whether this is
1750 * a bug in the PM (sending a DTOH device setbits with 'N' set
1751 * and 'I' not set), or a bug in the host controller.
1752 *
1753 * It only seems to occur under load.
1754 */
1755 if (sc->sc_flags & SILI_F_SSNTF) {
1756 u_int32_t data;
1757 const char *xstr;
1758
1759 data = sili_pread(ap, SILI_PREG_SNTF);
1760 if (is & SILI_PREG_IST_SDB) {
1761 sili_pwrite(ap, SILI_PREG_INT_STATUS,
1762 SILI_PREG_IST_SDB);
1763 is &= ~SILI_PREG_IST_SDB;
1764 xstr = " (no SDBS!)";
1765 } else {
1766 xstr = "";
1767 }
1768 if (data) {
1769 kprintf("%s: NOTIFY %08x%s\n",
1770 PORTNAME(ap), data, xstr);
1771 sili_pwrite(ap, SILI_PREG_SNTF, data);
1772 sili_cam_changed(ap, NULL, -1);
1773 }
1774 }
1775
1776 /*
1777 * Port change (hot-plug) (blockable).
1778 *
1779 * A PCS interrupt will occur on hot-plug once communication is
1780 * established.
1781 *
1782 * A PRCS interrupt will occur on hot-unplug (and possibly also
1783 * on hot-plug).
1784 *
1785 * XXX We can then check the CPS (Cold Presence State) bit, if
1786 * supported, to determine if a device is plugged in or not and do
1787 * the right thing.
1788 *
1789 * WARNING: A PCS interrupt is cleared by clearing DIAG_X, and
1790 * can also occur if an unsolicited COMINIT is received.
1791 * If this occurs command processing is automatically
1792 * stopped (CR goes inactive) and the port must be stopped
1793 * and restarted.
1794 */
1795 if (is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG)) {
1796 /* XXX */
1797 sili_pwrite(ap, SILI_PREG_SERR,
1798 (SILI_PREG_SERR_DIAG_N | SILI_PREG_SERR_DIAG_X));
1799 sili_pwrite(ap, SILI_PREG_INT_STATUS,
1800 is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG));
1801
1802 is &= ~(SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG);
1803 kprintf("%s: Port change\n", PORTNAME(ap));
1804
1805 switch (sili_pread(ap, SILI_PREG_SSTS) & SILI_PREG_SSTS_DET) {
1806 case SILI_PREG_SSTS_DET_DEV:
1807 if (ap->ap_type == ATA_PORT_T_NONE &&
1808 ap->ap_probe == ATA_PROBE_FAILED) {
1809 need = NEED_HOTPLUG_INSERT;
1810 goto fatal;
1811 }
1812 break;
1813 default:
1814 kprintf("%s: Device lost\n", PORTNAME(ap));
1815 if (ap->ap_type != ATA_PORT_T_NONE) {
1816 need = NEED_HOTPLUG_REMOVE;
1817 goto fatal;
1818 }
1819 break;
1820 }
1821 }
1822
1823 /*
1824 * Check for remaining errors - they are fatal. (blockable)
1825 */
1826 if (is & fatal_mask) {
1827 u_int32_t serr;
1828
1829 sili_pwrite(ap, SILI_PREG_INT_STATUS, is & fatal_mask);
1830
1831 serr = sili_pread(ap, SILI_PREG_SERR);
1832 kprintf("%s: Unrecoverable errors (IS: %b, SERR: %b), "
1833 "disabling port.\n",
1834 PORTNAME(ap),
1835 is, SILI_PFMT_INT_STATUS,
1836 serr, SILI_PFMT_SERR
1837 );
1838 is &= ~fatal_mask;
1839 /* XXX try recovery first */
1840 goto fatal;
1841 }
1842
1843 /*
1844 * Fail all outstanding commands if we know the port won't recover.
1845 *
1846 * We may have a ccb_at if the failed command is known and was
1847 * being sent to a device over a port multiplier (PM). In this
1848 * case if the port itself has not completely failed we fail just
1849 * the commands related to that target.
1850 */
1851 if (ap->ap_state == AP_S_FATAL_ERROR &&
1852 (ap->ap_active & ~ap->ap_expired)) {
1853 kprintf("%s: Fatal port error, expiring %08x\n",
1854 PORTNAME(ap), ap->ap_active & ~ap->ap_expired);
1855fatal:
1856 ap->ap_state = AP_S_FATAL_ERROR;
1857
1858 /*
1859 * Error all the active slots. If running across a PM
1860 * try to error out just the slots related to the target.
1861 */
1862 active = ap->ap_active & ~ap->ap_expired;
1863
1864 while (active) {
1865 slot = ffs(active) - 1;
1866 active &= ~(1 << slot);
1867 ccb = &ap->ap_ccbs[slot];
1868 sili_core_timeout(ccb, 1);
1869 }
1870 }
1871
1872 /*
1873 * CCB completion (non blocking).
1874 *
1875 * CCB completion is detected by noticing the slot bit in
1876 * the port slot status register has cleared while the bit
1877 * is still set in our ap_active variable.
1878 *
1879 * When completing expired events we must remember to reinit
1880 * the port once everything is clear.
1881 *
1882 * Due to a single-level recursion when reading the log page,
1883 * it is possible for the slot to already have been cleared
1884 * for some expired tags, do not include expired tags in
1885 * the list.
1886 */
1887 active = ap->ap_active & ~sili_pread(ap, SILI_PREG_SLOTST);
1888 active &= ~ap->ap_expired;
1889
1890 finished = active;
1891 while (active) {
1892 slot = ffs(active) - 1;
1893 ccb = &ap->ap_ccbs[slot];
1894
1895 DPRINTF(SILI_D_INTR, "%s: slot %d is complete%s\n",
1896 PORTNAME(ap), slot, ccb->ccb_xa.state == ATA_S_ERROR ?
1897 " (error)" : "");
1898
1899 active &= ~(1 << slot);
1900
1901 /*
1902 * XXX sync POSTREAD for return data?
1903 */
1904 ap->ap_active &= ~(1 << ccb->ccb_slot);
1905 --ap->ap_active_cnt;
1906
1907 /*
1908 * Complete the ccb. If the ccb was marked expired it
1909 * may or may not have been cleared from the port,
1910 * make sure we mark it as having timed out.
1911 *
1912 * In a normal completion if AUTOSENSE is set we copy
1913 * the PRB LRAM rfis back to the rfis in host-memory.
1914 *
1915 * XXX Currently AUTOSENSE also forces exclusivity so we
1916 * can safely work around a hardware bug when reading
1917 * the LRAM.
1918 */
1919 if (ap->ap_expired & (1 << ccb->ccb_slot)) {
1920 ap->ap_expired &= ~(1 << ccb->ccb_slot);
1921 ccb->ccb_xa.state = ATA_S_TIMEOUT;
1922 ccb->ccb_done(ccb);
1923 ccb->ccb_xa.complete(&ccb->ccb_xa);
1924 } else {
1925 if (ccb->ccb_xa.state == ATA_S_ONCHIP) {
1926 ccb->ccb_xa.state = ATA_S_COMPLETE;
1927 if (ccb->ccb_xa.flags & ATA_F_AUTOSENSE) {
1928 memcpy(ccb->ccb_xa.rfis,
1929 &ccb->ccb_prb_lram->prb_d2h,
1930 sizeof(ccb->ccb_prb_lram->prb_d2h));
1931 if (ccb->ccb_xa.state == ATA_S_TIMEOUT)
1932 ccb->ccb_xa.state = ATA_S_ERROR;
1933 }
1934 }
1935 ccb->ccb_done(ccb);
1936 }
1937 }
1938 if (is & SILI_PREG_IST_READY) {
1939 is &= ~SILI_PREG_IST_READY;
1940 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_READY);
1941 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_READY);
1942 }
1943
1944 /*
1945 * If we had expired commands and were waiting for
1946 * remaining commands to complete, and they have now
1947 * completed, we can reinit the port.
1948 *
1949 * This will also clean out the expired commands.
1950 * The timeout code also calls sili_port_reinit() if
1951 * the only commands remaining after a timeout are all
1952 * now expired commands.
1953 *
1954 * Otherwise just reissue.
1955 */
1956 if (ap->ap_expired && ap->ap_active == ap->ap_expired) {
1957 if (finished)
1958 sili_port_reinit(ap);
1959 } else {
1960 sili_issue_pending_commands(ap, NULL);
1961 }
1962
1963 /*
1964 * Cleanup. Will not be set if non-blocking.
1965 */
1966 switch(need) {
1967 case NEED_HOTPLUG_INSERT:
1968 /*
1969 * A hot-plug insertion event has occured and all
1970 * outstanding commands have already been revoked.
1971 *
1972 * Don't recurse if this occurs while we are
1973 * resetting the port.
1974 *
1975 * Place the port in a continuous COMRESET state
1976 * until the INIT code gets to it.
1977 */
1978 kprintf("%s: HOTPLUG - Device inserted\n",
1979 PORTNAME(ap));
1980 ap->ap_probe = ATA_PROBE_NEED_INIT;
1981 sili_cam_changed(ap, NULL, -1);
1982 break;
1983 case NEED_HOTPLUG_REMOVE:
1984 /*
1985 * A hot-plug removal event has occured and all
1986 * outstanding commands have already been revoked.
1987 *
1988 * Don't recurse if this occurs while we are
1989 * resetting the port.
1990 */
1991 kprintf("%s: HOTPLUG - Device removed\n",
1992 PORTNAME(ap));
1993 sili_port_hardstop(ap);
1994 /* ap_probe set to failed */
1995 sili_cam_changed(ap, NULL, -1);
1996 break;
1997 default:
1998 break;
1999 }
2000}
2001
2002struct sili_ccb *
2003sili_get_ccb(struct sili_port *ap)
2004{
2005 struct sili_ccb *ccb;
2006
2007 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE);
2008 ccb = TAILQ_FIRST(&ap->ap_ccb_free);
2009 if (ccb != NULL) {
2010 KKASSERT(ccb->ccb_xa.state == ATA_S_PUT);
2011 TAILQ_REMOVE(&ap->ap_ccb_free, ccb, ccb_entry);
2012 ccb->ccb_xa.state = ATA_S_SETUP;
2013 ccb->ccb_xa.at = NULL;
2014 }
2015 lockmgr(&ap->ap_ccb_lock, LK_RELEASE);
2016
2017 return (ccb);
2018}
2019
2020void
2021sili_put_ccb(struct sili_ccb *ccb)
2022{
2023 struct sili_port *ap = ccb->ccb_port;
2024
2025 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE);
2026 ccb->ccb_xa.state = ATA_S_PUT;
2027 TAILQ_INSERT_TAIL(&ap->ap_ccb_free, ccb, ccb_entry);
2028 lockmgr(&ap->ap_ccb_lock, LK_RELEASE);
2029}
2030
2031struct sili_ccb *
2032sili_get_err_ccb(struct sili_port *ap)
2033{
2034 struct sili_ccb *err_ccb;
2035
2036 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) == 0);
2037 ap->ap_flags |= AP_F_ERR_CCB_RESERVED;
2038
2039 /*
2040 * Grab a CCB to use for error recovery. This should never fail, as
2041 * we ask atascsi to reserve one for us at init time.
2042 */
2043 err_ccb = ap->ap_err_ccb;
2044 KKASSERT(err_ccb != NULL);
2045 err_ccb->ccb_xa.flags = 0;
2046 err_ccb->ccb_done = sili_empty_done;
2047
2048 return err_ccb;
2049}
2050
2051void
2052sili_put_err_ccb(struct sili_ccb *ccb)
2053{
2054 struct sili_port *ap = ccb->ccb_port;
2055
2056 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) != 0);
2057
2058 KKASSERT(ccb == ap->ap_err_ccb);
2059
2060 ap->ap_flags &= ~AP_F_ERR_CCB_RESERVED;
2061}
2062
2063/*
2064 * Read log page to get NCQ error.
2065 *
2066 * Return 0 on success
2067 */
2068void
2069sili_port_read_ncq_error(struct sili_port *ap, int target)
2070{
2071 struct sili_ccb *ccb;
2072 struct ata_fis_h2d *fis;
2073 int status;
2074
2075 DPRINTF(SILI_D_VERBOSE, "%s: read log page\n", PORTNAME(ap));
2076
2077 /* Prep error CCB for READ LOG EXT, page 10h, 1 sector. */
2078 ccb = sili_get_err_ccb(ap);
2079 ccb->ccb_done = sili_empty_done;
2080 ccb->ccb_xa.flags = ATA_F_NOWAIT | ATA_F_READ | ATA_F_POLL;
2081 ccb->ccb_xa.data = ap->ap_err_scratch;
2082 ccb->ccb_xa.datalen = 512;
2083 ccb->ccb_xa.complete = sili_dummy_done;
2084 ccb->ccb_xa.at = &ap->ap_ata[target];
2085 fis = &ccb->ccb_prb->prb_h2d;
2086 bzero(fis, sizeof(*fis));
2087
2088 fis->type = ATA_FIS_TYPE_H2D;
2089 fis->flags = ATA_H2D_FLAGS_CMD | target;
2090 fis->command = ATA_C_READ_LOG_EXT;
2091 fis->lba_low = 0x10; /* queued error log page (10h) */
2092 fis->sector_count = 1; /* number of sectors (1) */
2093 fis->sector_count_exp = 0;
2094 fis->lba_mid = 0; /* starting offset */
2095 fis->lba_mid_exp = 0;
2096 fis->device = 0;
2097
2098 /*
2099 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb
2100 */
2101 if (sili_load_prb(ccb) != 0) {
2102 status = ATA_S_ERROR;
2103 } else {
2104 ccb->ccb_xa.state = ATA_S_PENDING;
2105 status = sili_poll(ccb, 1000, sili_quick_timeout);
2106 }
2107
2108 /*
2109 * Just spew if it fails, there isn't much we can do at this point.
2110 */
2111 if (status != ATA_S_COMPLETE) {
2112 kprintf("%s: log page read failed, slot %d was still active.\n",
2113 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot);
2114 }
2115
2116 /* Done with the error CCB now. */
2117 sili_unload_prb(ccb);
2118 sili_put_err_ccb(ccb);
2119
2120 /* Extract failed register set and tags from the scratch space. */
2121 if (status == ATA_S_COMPLETE) {
2122 struct ata_log_page_10h *log;
2123 int err_slot;
2124
2125 log = (struct ata_log_page_10h *)ap->ap_err_scratch;
2126 if (log->err_regs.type & ATA_LOG_10H_TYPE_NOTQUEUED) {
2127 /*
2128 * Not queued bit was set - wasn't an NCQ error?
2129 *
2130 * XXX This bit seems to be set a lot even for NCQ
2131 * errors?
2132 */
2133 } else {
2134 /*
2135 * Copy back the log record as a D2H register FIS.
2136 */
2137 err_slot = log->err_regs.type &
2138 ATA_LOG_10H_TYPE_TAG_MASK;
2139 ccb = &ap->ap_ccbs[err_slot];
2140 if (ap->ap_expired & (1 << ccb->ccb_slot)) {
2141 kprintf("%s: read NCQ error page slot=%d\n",
2142 ATANAME(ap, ccb->ccb_xa.at), err_slot
2143 );
2144 memcpy(&ccb->ccb_prb->prb_d2h, &log->err_regs,
2145 sizeof(struct ata_fis_d2h));
2146 ccb->ccb_prb->prb_d2h.type = ATA_FIS_TYPE_D2H;
2147 ccb->ccb_prb->prb_d2h.flags = 0;
2148 if (ccb->ccb_xa.state == ATA_S_TIMEOUT)
2149 ccb->ccb_xa.state = ATA_S_ERROR;
2150 } else {
2151 kprintf("%s: read NCQ error page slot=%d, "
2152 "slot does not match any cmds\n",
2153 ATANAME(ccb->ccb_port, ccb->ccb_xa.at),
2154 err_slot
2155 );
2156 }
2157 }
2158 }
2159}
2160
2161/*
2162 * Allocate memory for various structures DMAd by hardware. The maximum
2163 * number of segments for these tags is 1 so the DMA memory will have a
2164 * single physical base address.
2165 */
2166struct sili_dmamem *
2167sili_dmamem_alloc(struct sili_softc *sc, bus_dma_tag_t tag)
2168{
2169 struct sili_dmamem *adm;
2170 int error;
2171
2172 adm = kmalloc(sizeof(*adm), M_DEVBUF, M_INTWAIT | M_ZERO);
2173
2174 error = bus_dmamem_alloc(tag, (void **)&adm->adm_kva,
2175 BUS_DMA_ZERO, &adm->adm_map);
2176 if (error == 0) {
2177 adm->adm_tag = tag;
2178 error = bus_dmamap_load(tag, adm->adm_map,
2179 adm->adm_kva,
2180 bus_dma_tag_getmaxsize(tag),
2181 sili_dmamem_saveseg, &adm->adm_busaddr,
2182 0);
2183 }
2184 if (error) {
2185 if (adm->adm_map) {
2186 bus_dmamap_destroy(tag, adm->adm_map);
2187 adm->adm_map = NULL;
2188 adm->adm_tag = NULL;
2189 adm->adm_kva = NULL;
2190 }
2191 kfree(adm, M_DEVBUF);
2192 adm = NULL;
2193 }
2194 return (adm);
2195}
2196
2197static
2198void
2199sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error)
2200{
2201 KKASSERT(error == 0);
2202 KKASSERT(nsegs == 1);
2203 *(bus_addr_t *)info = segs->ds_addr;
2204}
2205
2206
2207void
2208sili_dmamem_free(struct sili_softc *sc, struct sili_dmamem *adm)
2209{
2210 if (adm->adm_map) {
2211 bus_dmamap_unload(adm->adm_tag, adm->adm_map);
2212 bus_dmamap_destroy(adm->adm_tag, adm->adm_map);
2213 adm->adm_map = NULL;
2214 adm->adm_tag = NULL;
2215 adm->adm_kva = NULL;
2216 }
2217 kfree(adm, M_DEVBUF);
2218}
2219
2220u_int32_t
2221sili_read(struct sili_softc *sc, bus_size_t r)
2222{
2223 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
2224 BUS_SPACE_BARRIER_READ);
2225 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
2226}
2227
2228void
2229sili_write(struct sili_softc *sc, bus_size_t r, u_int32_t v)
2230{
2231 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
2232 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
2233 BUS_SPACE_BARRIER_WRITE);
2234}
2235
2236u_int32_t
2237sili_pread(struct sili_port *ap, bus_size_t r)
2238{
2239 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4,
2240 BUS_SPACE_BARRIER_READ);
2241 return (bus_space_read_4(ap->ap_sc->sc_iot, ap->ap_ioh, r));
2242}
2243
2244void
2245sili_pwrite(struct sili_port *ap, bus_size_t r, u_int32_t v)
2246{
2247 bus_space_write_4(ap->ap_sc->sc_iot, ap->ap_ioh, r, v);
2248 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4,
2249 BUS_SPACE_BARRIER_WRITE);
2250}
2251
2252/*
2253 * Wait up to (timeout) milliseconds for the masked port register to
2254 * match the target.
2255 *
2256 * Timeout is in milliseconds.
2257 */
2258int
2259sili_pwait_eq(struct sili_port *ap, int timeout,
2260 bus_size_t r, u_int32_t mask, u_int32_t target)
2261{
2262 int t;
2263
2264 /*
2265 * Loop hard up to 100uS
2266 */
2267 for (t = 0; t < 100; ++t) {
2268 if ((sili_pread(ap, r) & mask) == target)
2269 return (0);
2270 sili_os_hardsleep(1); /* us */
2271 }
2272
2273 do {
2274 timeout -= sili_os_softsleep();
2275 if ((sili_pread(ap, r) & mask) == target)
2276 return (0);
2277 } while (timeout > 0);
2278 return (1);
2279}
2280
2281int
2282sili_wait_ne(struct sili_softc *sc, bus_size_t r, u_int32_t mask,
2283 u_int32_t target)
2284{
2285 int t;
2286
2287 /*
2288 * Loop hard up to 100uS
2289 */
2290 for (t = 0; t < 100; ++t) {
2291 if ((sili_read(sc, r) & mask) != target)
2292 return (0);
2293 sili_os_hardsleep(1); /* us */
2294 }
2295
2296 /*
2297 * And one millisecond the slow way
2298 */
2299 t = 1000;
2300 do {
2301 t -= sili_os_softsleep();
2302 if ((sili_read(sc, r) & mask) != target)
2303 return (0);
2304 } while (t > 0);
2305
2306 return (1);
2307}
2308
2309
2310/*
2311 * Acquire an ata transfer.
2312 *
2313 * Pass a NULL at for direct-attached transfers, and a non-NULL at for
2314 * targets that go through the port multiplier.
2315 */
2316struct ata_xfer *
2317sili_ata_get_xfer(struct sili_port *ap, struct ata_port *at)
2318{
2319 struct sili_ccb *ccb;
2320
2321 ccb = sili_get_ccb(ap);
2322 if (ccb == NULL) {
2323 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer: NULL ccb\n",
2324 PORTNAME(ap));
2325 return (NULL);
2326 }
2327
2328 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer got slot %d\n",
2329 PORTNAME(ap), ccb->ccb_slot);
2330
2331 bzero(ccb->ccb_xa.fis, sizeof(*ccb->ccb_xa.fis));
2332 ccb->ccb_xa.at = at;
2333 ccb->ccb_xa.fis->type = ATA_FIS_TYPE_H2D;
2334
2335 return (&ccb->ccb_xa);
2336}
2337
2338void
2339sili_ata_put_xfer(struct ata_xfer *xa)
2340{
2341 struct sili_ccb *ccb = (struct sili_ccb *)xa;
2342
2343 DPRINTF(SILI_D_XFER, "sili_ata_put_xfer slot %d\n", ccb->ccb_slot);
2344
2345 sili_put_ccb(ccb);
2346}
2347
2348int
2349sili_ata_cmd(struct ata_xfer *xa)
2350{
2351 struct sili_ccb *ccb = (struct sili_ccb *)xa;
2352
2353 KKASSERT(xa->state == ATA_S_SETUP);
2354
2355 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR)
2356 goto failcmd;
2357#if 0
2358 kprintf("%s: started std command %b ccb %d ccb_at %p %d\n",
2359 ATANAME(ccb->ccb_port, ccb->ccb_xa.at),
2360 sili_pread(ccb->ccb_port, SILI_PREG_CMD), SILI_PFMT_CMD,
2361 ccb->ccb_slot,
2362 ccb->ccb_xa.at,
2363 ccb->ccb_xa.at ? ccb->ccb_xa.at->at_target : -1);
2364#endif
2365
2366 ccb->ccb_done = sili_ata_cmd_done;
2367
2368 if (sili_load_prb(ccb) != 0)
2369 goto failcmd;
2370
2371 xa->state = ATA_S_PENDING;
2372
2373 if (xa->flags & ATA_F_POLL)
2374 return (sili_poll(ccb, xa->timeout, sili_ata_cmd_timeout));
2375
2376 crit_enter();
2377 KKASSERT((xa->flags & ATA_F_TIMEOUT_EXPIRED) == 0);
2378 xa->flags |= ATA_F_TIMEOUT_DESIRED;
2379 sili_start(ccb);
2380 crit_exit();
2381 return (xa->state);
2382
2383failcmd:
2384 crit_enter();
2385 xa->state = ATA_S_ERROR;
2386 xa->complete(xa);
2387 crit_exit();
2388 return (ATA_S_ERROR);
2389}
2390
2391static void
2392sili_ata_cmd_done(struct sili_ccb *ccb)
2393{
2394 struct ata_xfer *xa = &ccb->ccb_xa;
2395
2396 /*
2397 * NOTE: callout does not lock port and may race us modifying
2398 * the flags, so make sure its stopped.
2399 */
2400 if (xa->flags & ATA_F_TIMEOUT_RUNNING) {
2401 callout_stop(&ccb->ccb_timeout);
2402 xa->flags &= ~ATA_F_TIMEOUT_RUNNING;
2403 }
2404 xa->flags &= ~(ATA_F_TIMEOUT_DESIRED | ATA_F_TIMEOUT_EXPIRED);
2405
2406 KKASSERT(xa->state != ATA_S_ONCHIP);
2407 sili_unload_prb(ccb);
2408
2409 if (xa->state != ATA_S_TIMEOUT)
2410 xa->complete(xa);
2411}
2412
2413/*
2414 * Timeout from callout, MPSAFE - nothing can mess with the CCB's flags
2415 * while the callout is runing.
2416 *
2417 * We can't safely get the port lock here or delay, we could block
2418 * the callout thread.
2419 */
2420static void
2421sili_ata_cmd_timeout_unserialized(void *arg)
2422{
2423 struct sili_ccb *ccb = arg;
2424 struct sili_port *ap = ccb->ccb_port;
2425
2426 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING;
2427 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_EXPIRED;
2428 sili_os_signal_port_thread(ap, AP_SIGF_TIMEOUT);
2429}
2430
2431void
2432sili_ata_cmd_timeout(struct sili_ccb *ccb)
2433{
2434 sili_core_timeout(ccb, 0);
2435}
2436
2437/*
2438 * Timeout code, typically called when the port command processor is running.
2439 *
2440 * Returns 0 if all timeout processing completed, non-zero if it is still
2441 * in progress.
2442 */
2443static
2444int
2445sili_core_timeout(struct sili_ccb *ccb, int really_error)
2446{
2447 struct ata_xfer *xa = &ccb->ccb_xa;
2448 struct sili_port *ap = ccb->ccb_port;
2449 struct ata_port *at;
2450
2451 at = ccb->ccb_xa.at;
2452
2453 kprintf("%s: CMD %s state=%d slot=%d\n"
2454 "\t active=%08x\n"
2455 "\texpired=%08x\n"
2456 "\thactive=%08x\n",
2457 ATANAME(ap, at),
2458 (really_error ? "ERROR" : "TIMEOUT"),
2459 ccb->ccb_xa.state, ccb->ccb_slot,
2460 ap->ap_active,
2461 ap->ap_expired,
2462 sili_pread(ap, SILI_PREG_SLOTST)
2463 );
2464
2465 /*
2466 * NOTE: Timeout will not be running if the command was polled.
2467 * If we got here at least one of these flags should be set.
2468 *
2469 * However, it might be running if we are called from the
2470 * interrupt error handling code.
2471 */
2472 KKASSERT(xa->flags & (ATA_F_POLL | ATA_F_TIMEOUT_DESIRED |
2473 ATA_F_TIMEOUT_RUNNING));
2474 if (xa->flags & ATA_F_TIMEOUT_RUNNING) {
2475 callout_stop(&ccb->ccb_timeout);
2476 xa->flags &= ~ATA_F_TIMEOUT_RUNNING;
2477 }
2478 xa->flags &= ~ATA_F_TIMEOUT_EXPIRED;
2479
2480 if (ccb->ccb_xa.state == ATA_S_PENDING) {
2481 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry);
2482 ccb->ccb_xa.state = ATA_S_TIMEOUT;
2483 ccb->ccb_done(ccb);
2484 xa->complete(xa);
2485 sili_issue_pending_commands(ap, NULL);
2486 return(1);
2487 }
2488 if (ccb->ccb_xa.state != ATA_S_ONCHIP) {
2489 kprintf("%s: Unexpected state during timeout: %d\n",
2490 ATANAME(ap, at), ccb->ccb_xa.state);
2491 return(1);
2492 }
2493
2494 /*
2495 * We can't process timeouts while other commands are running.
2496 */
2497 ap->ap_expired |= 1 << ccb->ccb_slot;
2498
2499 if (ap->ap_active != ap->ap_expired) {
2500 kprintf("%s: Deferred timeout until its safe, slot %d\n",
2501 ATANAME(ap, at), ccb->ccb_slot);
2502 return(1);
2503 }
2504
2505 /*
2506 * We have to issue a Port reinit. We don't read an error log
2507 * page for timeouts. Reiniting the port will clear all pending
2508 * commands.
2509 */
2510 sili_port_reinit(ap);
2511 return(0);
2512}
2513
2514/*
2515 * Used by the softreset, pm_port_probe, and read_ncq_error only, in very
2516 * specialized, controlled circumstances.
2517 */
2518void
2519sili_quick_timeout(struct sili_ccb *ccb)
2520{
2521 struct sili_port *ap = ccb->ccb_port;
2522
2523 switch (ccb->ccb_xa.state) {
2524 case ATA_S_PENDING:
2525 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry);
2526 ccb->ccb_xa.state = ATA_S_TIMEOUT;
2527 break;
2528 case ATA_S_ONCHIP:
2529 KKASSERT((ap->ap_active & ~ap->ap_expired) ==
2530 (1 << ccb->ccb_slot));
2531 ccb->ccb_xa.state = ATA_S_TIMEOUT;
2532 ap->ap_active &= ~(1 << ccb->ccb_slot);
2533 KKASSERT(ap->ap_active_cnt > 0);
2534 --ap->ap_active_cnt;
2535 sili_port_reinit(ap);
2536 break;
2537 default:
2538 panic("%s: sili_quick_timeout: ccb in bad state %d",
2539 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_xa.state);
2540 }
2541}
2542
2543static void
2544sili_dummy_done(struct ata_xfer *xa)
2545{
2546}
2547
2548static void
2549sili_empty_done(struct sili_ccb *ccb)
2550{
2551}