2 * Copyright (c) 2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org>
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
51 * $OpenBSD: sili.c,v 1.147 2009/02/16 21:19:07 miod Exp $
56 void sili_port_interrupt_enable(struct sili_port *ap);
57 void sili_port_interrupt_redisable(struct sili_port *ap);
58 void sili_port_interrupt_reenable(struct sili_port *ap);
60 int sili_load_prb(struct sili_ccb *);
61 void sili_unload_prb(struct sili_ccb *);
62 static void sili_load_prb_callback(void *info, bus_dma_segment_t *segs,
63 int nsegs, int error);
64 void sili_start(struct sili_ccb *);
65 int sili_port_softreset(struct sili_port *ap);
66 int sili_port_hardreset(struct sili_port *ap);
67 void sili_port_hardstop(struct sili_port *ap);
68 void sili_port_listen(struct sili_port *ap);
70 static void sili_ata_cmd_timeout_unserialized(void *);
71 static int sili_core_timeout(struct sili_ccb *ccb, int really_error);
72 void sili_check_active_timeouts(struct sili_port *ap);
75 void sili_beg_exclusive_access(struct sili_port *ap, struct ata_port *at);
76 void sili_end_exclusive_access(struct sili_port *ap, struct ata_port *at);
78 void sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb);
80 void sili_port_read_ncq_error(struct sili_port *, int);
82 struct sili_dmamem *sili_dmamem_alloc(struct sili_softc *, bus_dma_tag_t tag);
83 void sili_dmamem_free(struct sili_softc *, struct sili_dmamem *);
84 static void sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error);
86 static void sili_dummy_done(struct ata_xfer *xa);
87 static void sili_empty_done(struct sili_ccb *ccb);
88 static void sili_ata_cmd_done(struct sili_ccb *ccb);
91 * Initialize the global SILI hardware. This code does not set up any of
95 sili_init(struct sili_softc *sc)
97 DPRINTF(SILI_D_VERBOSE, " GHC 0x%b",
98 sili_read(sc, SILI_REG_GHC), SILI_FMT_GHC);
101 * Reset the entire chip. This also resets all ports.
103 * The spec doesn't say anything about how long we have to
104 * wait, so wait 10ms.
106 sili_write(sc, SILI_REG_GCTL, SILI_REG_GCTL_GRESET);
108 sili_write(sc, SILI_REG_GCTL, 0);
115 * Allocate and initialize an SILI port.
118 sili_port_alloc(struct sili_softc *sc, u_int port)
120 struct sili_port *ap;
122 struct sili_prb *prb;
123 struct sili_ccb *ccb;
128 ap = kmalloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO);
129 ap->ap_err_scratch = kmalloc(512, M_DEVBUF, M_WAITOK | M_ZERO);
131 ksnprintf(ap->ap_name, sizeof(ap->ap_name), "%s%d.%d",
132 device_get_name(sc->sc_dev),
133 device_get_unit(sc->sc_dev),
135 sc->sc_ports[port] = ap;
138 * Allocate enough so we never have to reallocate, it makes
141 * ap_pmcount will be reduced by the scan if we encounter the
142 * port multiplier port prior to target 15.
144 if (ap->ap_ata == NULL) {
145 ap->ap_ata = kmalloc(sizeof(*ap->ap_ata) * SILI_MAX_PMPORTS,
146 M_DEVBUF, M_INTWAIT | M_ZERO);
147 for (i = 0; i < SILI_MAX_PMPORTS; ++i) {
149 at->at_sili_port = ap;
151 at->at_probe = ATA_PROBE_NEED_INIT;
152 at->at_features |= ATA_PORT_F_RESCAN;
153 ksnprintf(at->at_name, sizeof(at->at_name),
154 "%s.%d", ap->ap_name, i);
157 if (bus_space_subregion(sc->sc_piot, sc->sc_pioh,
158 SILI_PORT_REGION(port), SILI_PORT_SIZE,
160 device_printf(sc->sc_dev,
161 "unable to create register window for port %d\n",
168 ap->ap_probe = ATA_PROBE_NEED_INIT;
169 TAILQ_INIT(&ap->ap_ccb_free);
170 TAILQ_INIT(&ap->ap_ccb_pending);
171 lockinit(&ap->ap_ccb_lock, "silipo", 0, 0);
173 /* Disable port interrupts */
174 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK);
177 * Reset the port. This is similar to a Device Reset but far
178 * more invasive. We use Device Reset in our hardreset function.
179 * This function also does the same OOB initialization sequence
180 * that Device Reset does.
182 * NOTE: SILI_PREG_STATUS_READY will not be asserted unless and until
183 * a device is connected to the port, so we can't use it to
184 * verify that the port exists.
186 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET);
187 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) {
188 device_printf(sc->sc_dev,
189 "Port %d will not go into reset\n", port);
193 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET);
196 * Allocate the SGE Table
198 ap->ap_dmamem_prbs = sili_dmamem_alloc(sc, sc->sc_tag_prbs);
199 if (ap->ap_dmamem_prbs == NULL) {
200 kprintf("%s: NOSGET\n", PORTNAME(ap));
205 * Set up the SGE table base address
207 ap->ap_prbs = (struct sili_prb *)SILI_DMA_KVA(ap->ap_dmamem_prbs);
210 * Allocate a CCB for each command slot
212 ap->ap_ccbs = kmalloc(sizeof(struct sili_ccb) * sc->sc_ncmds, M_DEVBUF,
214 if (ap->ap_ccbs == NULL) {
215 device_printf(sc->sc_dev,
216 "unable to allocate command list for port %d\n",
222 * Most structures are in the port BAR. Assign convenient
223 * pointers in the CCBs
225 for (i = 0; i < sc->sc_ncmds; i++) {
226 ccb = &ap->ap_ccbs[i];
228 error = bus_dmamap_create(sc->sc_tag_data, BUS_DMA_ALLOCNOW,
231 device_printf(sc->sc_dev,
232 "unable to create dmamap for port %d "
233 "ccb %d\n", port, i);
238 * WARNING!!! Access to the rfis is only allowed under very
239 * carefully controlled circumstances because it
240 * is located in the LRAM and reading from the
241 * LRAM has hardware issues which can blow the
242 * port up. I kid you not (from Linux, and
243 * verified by testing here).
245 callout_init(&ccb->ccb_timeout);
248 ccb->ccb_prb = &ap->ap_prbs[i];
249 ccb->ccb_prb_paddr = SILI_DMA_DVA(ap->ap_dmamem_prbs) +
250 sizeof(*ccb->ccb_prb) * i;
251 ccb->ccb_xa.fis = &ccb->ccb_prb->prb_h2d;
252 prb = bus_space_kva(ap->ap_sc->sc_iot, ap->ap_ioh,
253 SILI_PREG_LRAM_SLOT(i));
254 ccb->ccb_prb_lram = prb;
256 * Point our rfis to host-memory instead of the LRAM PRB.
257 * It will be copied back if ATA_F_AUTOSENSE is set. The
260 /*ccb->ccb_xa.rfis = &prb->prb_d2h;*/
261 ccb->ccb_xa.rfis = (void *)ccb->ccb_xa.fis;
263 ccb->ccb_xa.packetcmd = prb_packet(ccb->ccb_prb);
266 ccb->ccb_xa.state = ATA_S_COMPLETE;
269 * Reserve CCB[1] as the error CCB. It doesn't matter
270 * which one we use for the Sili controllers.
273 ap->ap_err_ccb = ccb;
278 * Do not call sili_port_init() here, the helper thread will
279 * call it for the parallel probe
281 sili_os_start_port(ap);
284 sili_port_free(sc, port);
289 * This is called once by the low level attach (from the helper thread)
290 * to get the port state machine rolling, and typically only called again
291 * on a hot-plug insertion event.
293 * This is called for PM attachments and hot-plug insertion events, and
294 * typically not called again until after an unplug/replug sequence.
296 * Returns 0 if a device is successfully detected.
299 sili_port_init(struct sili_port *ap)
302 * Do a very hard reset of the port
304 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET);
306 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET);
309 * Register initialization
311 sili_pwrite(ap, SILI_PREG_FIFO_CTL,
312 SILI_PREG_FIFO_CTL_ENCODE(1024, 1024));
313 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_32BITDMA |
315 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_NOAUTOCC);
316 if (ap->ap_sc->sc_flags & SILI_F_SSNTF)
317 sili_pwrite(ap, SILI_PREG_SNTF, -1);
318 ap->ap_probe = ATA_PROBE_NEED_HARD_RESET;
320 sili_port_interrupt_enable(ap);
325 * Handle an errored port. This routine is called when the only
326 * commands left on the queue are expired, meaning we can safely
327 * go through a port init to clear its state.
329 * We complete the expired CCBs and then restart the queue.
333 sili_port_reinit(struct sili_port *ap)
335 struct sili_ccb *ccb;
342 reentrant = (ap->ap_flags & AP_F_ERR_CCB_RESERVED) ? 1 : 0;
344 if (bootverbose || 1) {
345 kprintf("%s: reiniting port after error reent=%d "
347 PORTNAME(ap), reentrant, ap->ap_expired);
351 * Clear port resume, clear bits 16:13 in the port device status
352 * register. This is from the data sheet.
354 * Data sheet does not specify a delay but it seems prudent.
356 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME);
358 for (target = 0; target < SILI_MAX_PMPORTS; ++target) {
359 data = sili_pread(ap, SILI_PREG_PM_STATUS(target));
360 data &= ~(SILI_PREG_PM_STATUS_SERVICE |
361 SILI_PREG_PM_STATUS_LEGACY |
362 SILI_PREG_PM_STATUS_NATIVE |
363 SILI_PREG_PM_STATUS_VBSY);
364 sili_pwrite(ap, SILI_PREG_PM_STATUS(target), data);
365 sili_pwrite(ap, SILI_PREG_PM_QACTIVE(target), 0);
369 * Issue a Port Initialize and wait for it to clear. This flushes
370 * commands but does not reset the port. Then wait for port ready.
372 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_INIT);
373 if (sili_pwait_clr_to(ap, 5000, SILI_PREG_STATUS, SILI_PREG_CTL_INIT)) {
374 kprintf("%s: Unable to reinit, port failed\n",
377 if (sili_pwait_set(ap, SILI_PREG_STATUS, SILI_PREG_STATUS_READY)) {
378 kprintf("%s: Unable to reinit, port will not come ready\n",
383 * If reentrant, stop here. Otherwise the state for the original
384 * ahci_port_reinit() will get ripped out from under it.
390 * Read the LOG ERROR page for targets that returned a specific
391 * D2H FIS with ERR set.
393 for (target = 0; target < SILI_MAX_PMPORTS; ++target) {
394 at = &ap->ap_ata[target];
395 if (at->at_features & ATA_PORT_F_READLOG) {
396 at->at_features &= ~ATA_PORT_F_READLOG;
397 sili_port_read_ncq_error(ap, target);
402 * Finally clean out the expired commands, we've probed the error
403 * status (or hopefully probed the error status). Well, ok,
404 * we probably didn't XXX.
406 while (ap->ap_expired) {
407 slot = ffs(ap->ap_expired) - 1;
408 ap->ap_expired &= ~(1 << slot);
409 KKASSERT(ap->ap_active & (1 << slot));
410 ap->ap_active &= ~(1 << slot);
412 ccb = &ap->ap_ccbs[slot];
413 ccb->ccb_xa.state = ATA_S_TIMEOUT;
415 ccb->ccb_xa.complete(&ccb->ccb_xa);
419 * Wow. All done. We can get the port moving again.
421 if (ap->ap_probe == ATA_PROBE_FAILED) {
422 kprintf("%s: reinit failed, port is dead\n", PORTNAME(ap));
423 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) {
424 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry);
425 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED;
426 ccb->ccb_xa.state = ATA_S_TIMEOUT;
428 ccb->ccb_xa.complete(&ccb->ccb_xa);
431 sili_issue_pending_commands(ap, NULL);
436 * Enable or re-enable interrupts on a port.
438 * This routine is called from the port initialization code or from the
439 * helper thread as the real interrupt may be forced to turn off certain
443 sili_port_interrupt_enable(struct sili_port *ap)
447 data = SILI_PREG_INT_CCOMPLETE | SILI_PREG_INT_CERROR |
448 SILI_PREG_INT_PHYRDYCHG | SILI_PREG_INT_DEVEXCHG |
449 SILI_PREG_INT_DECODE | SILI_PREG_INT_CRC |
450 SILI_PREG_INT_HANDSHK | SILI_PREG_INT_PMCHANGE;
451 if (ap->ap_sc->sc_flags & SILI_F_SSNTF)
452 data |= SILI_PREG_INT_SDB;
453 sili_pwrite(ap, SILI_PREG_INT_ENABLE, data);
457 sili_port_interrupt_redisable(struct sili_port *ap)
461 data = sili_read(ap->ap_sc, SILI_REG_GCTL);
462 data &= SILI_REG_GINT_PORTMASK;
463 data &= ~(1 << ap->ap_num);
464 sili_write(ap->ap_sc, SILI_REG_GCTL, data);
468 sili_port_interrupt_reenable(struct sili_port *ap)
472 data = sili_read(ap->ap_sc, SILI_REG_GCTL);
473 data &= SILI_REG_GINT_PORTMASK;
474 data |= (1 << ap->ap_num);
475 sili_write(ap->ap_sc, SILI_REG_GCTL, data);
479 * Run the port / target state machine from a main context.
481 * The state machine for the port is always run.
483 * If atx is non-NULL run the state machine for a particular target.
484 * If atx is NULL run the state machine for all targets.
487 sili_port_state_machine(struct sili_port *ap, int initial)
496 * State machine for port. Note that CAM is not yet associated
497 * during the initial parallel probe and the port's probe state
498 * will not get past ATA_PROBE_NEED_IDENT.
501 if (initial == 0 && ap->ap_probe <= ATA_PROBE_NEED_HARD_RESET) {
502 kprintf("%s: Waiting 7 seconds on insertion\n",
507 if (ap->ap_probe == ATA_PROBE_NEED_INIT)
509 if (ap->ap_probe == ATA_PROBE_NEED_HARD_RESET)
510 sili_port_reset(ap, NULL, 1);
511 if (ap->ap_probe == ATA_PROBE_NEED_SOFT_RESET)
512 sili_port_reset(ap, NULL, 0);
513 if (ap->ap_probe == ATA_PROBE_NEED_IDENT)
514 sili_cam_probe(ap, NULL);
516 if (ap->ap_type != ATA_PORT_T_PM) {
517 if (ap->ap_probe == ATA_PROBE_FAILED) {
518 sili_cam_changed(ap, NULL, 0);
519 } else if (ap->ap_probe >= ATA_PROBE_NEED_IDENT) {
520 sili_cam_changed(ap, NULL, 1);
526 * Port Multiplier state machine.
528 * Get a mask of changed targets and combine with any runnable
529 * states already present.
531 for (loop = 0; ;++loop) {
532 if (sili_pm_read(ap, 15, SATA_PMREG_EINFO, &data)) {
533 kprintf("%s: PM unable to read hot-plug bitmap\n",
539 * Do at least one loop, then stop if no more state changes
540 * have occured. The PM might not generate a new
541 * notification until we clear the entire bitmap.
543 if (loop && data == 0)
547 * New devices showing up in the bitmap require some spin-up
548 * time before we start probing them. Reset didsleep. The
549 * first new device we detect will sleep before probing.
551 * This only applies to devices whos change bit is set in
552 * the data, and does not apply to the initial boot-time
557 for (target = 0; target < ap->ap_pmcount; ++target) {
558 at = &ap->ap_ata[target];
561 * Check the target state for targets behind the PM
562 * which have changed state. This will adjust
563 * at_probe and set ATA_PORT_F_RESCAN
565 * We want to wait at least 10 seconds before probing
566 * a newly inserted device. If the check status
567 * indicates a device is present and in need of a
568 * hard reset, we make sure we have slept before
571 * We also need to wait at least 1 second for the
572 * PHY state to change after insertion, if we
573 * haven't already waited the 10 seconds.
575 * NOTE: When pm_check_good finds a good port it
576 * typically starts us in probe state
577 * NEED_HARD_RESET rather than INIT.
579 if (data & (1 << target)) {
580 if (initial == 0 && didsleep == 0)
582 sili_pm_check_good(ap, target);
583 if (initial == 0 && didsleep == 0 &&
584 at->at_probe <= ATA_PROBE_NEED_HARD_RESET
587 kprintf("%s: Waiting 10 seconds on insertion\n", PORTNAME(ap));
588 sili_os_sleep(10000);
593 * Report hot-plug events before the probe state
594 * really gets hot. Only actual events are reported
595 * here to reduce spew.
597 if (data & (1 << target)) {
598 kprintf("%s: HOTPLUG (PM) - ", ATANAME(ap, at));
599 switch(at->at_probe) {
600 case ATA_PROBE_NEED_INIT:
601 case ATA_PROBE_NEED_HARD_RESET:
602 kprintf("Device inserted\n");
604 case ATA_PROBE_FAILED:
605 kprintf("Device removed\n");
608 kprintf("Device probe in progress\n");
614 * Run through the state machine as necessary if
615 * the port is not marked failed.
617 * The state machine may stop at NEED_IDENT if
618 * CAM is not yet attached.
620 * Acquire exclusive access to the port while we
621 * are doing this. This prevents command-completion
622 * from queueing commands for non-polled targets
623 * inbetween our probe steps. We need to do this
624 * because the reset probes can generate severe PHY
625 * and protocol errors and soft-brick the port.
627 if (at->at_probe != ATA_PROBE_FAILED &&
628 at->at_probe != ATA_PROBE_GOOD) {
629 if (at->at_probe == ATA_PROBE_NEED_INIT)
630 sili_pm_port_init(ap, at);
631 if (at->at_probe == ATA_PROBE_NEED_HARD_RESET)
632 sili_port_reset(ap, at, 1);
633 if (at->at_probe == ATA_PROBE_NEED_SOFT_RESET)
634 sili_port_reset(ap, at, 0);
635 if (at->at_probe == ATA_PROBE_NEED_IDENT)
636 sili_cam_probe(ap, at);
640 * Add or remove from CAM
642 if (at->at_features & ATA_PORT_F_RESCAN) {
643 at->at_features &= ~ATA_PORT_F_RESCAN;
644 if (at->at_probe == ATA_PROBE_FAILED) {
645 sili_cam_changed(ap, at, 0);
646 } else if (at->at_probe >= ATA_PROBE_NEED_IDENT) {
647 sili_cam_changed(ap, at, 1);
650 data &= ~(1 << target);
653 kprintf("%s: WARNING (PM): extra bits set in "
654 "EINFO: %08x\n", PORTNAME(ap), data);
655 while (target < SILI_MAX_PMPORTS) {
656 sili_pm_check_good(ap, target);
664 * De-initialize and detach a port.
667 sili_port_free(struct sili_softc *sc, u_int port)
669 struct sili_port *ap = sc->sc_ports[port];
670 struct sili_ccb *ccb;
673 * Ensure port is disabled and its interrupts are all flushed.
676 sili_os_stop_port(ap);
677 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK);
678 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET);
679 sili_write(ap->ap_sc, SILI_REG_GCTL,
680 sili_read(ap->ap_sc, SILI_REG_GCTL) &
681 ~SILI_REG_GINT_PORTST(ap->ap_num));
685 while ((ccb = sili_get_ccb(ap)) != NULL) {
686 if (ccb->ccb_dmamap) {
687 bus_dmamap_destroy(sc->sc_tag_data,
689 ccb->ccb_dmamap = NULL;
692 if ((ccb = ap->ap_err_ccb) != NULL) {
693 if (ccb->ccb_dmamap) {
694 bus_dmamap_destroy(sc->sc_tag_data,
696 ccb->ccb_dmamap = NULL;
698 ap->ap_err_ccb = NULL;
700 kfree(ap->ap_ccbs, M_DEVBUF);
704 if (ap->ap_dmamem_prbs) {
705 sili_dmamem_free(sc, ap->ap_dmamem_prbs);
706 ap->ap_dmamem_prbs = NULL;
709 kfree(ap->ap_ata, M_DEVBUF);
712 if (ap->ap_err_scratch) {
713 kfree(ap->ap_err_scratch, M_DEVBUF);
714 ap->ap_err_scratch = NULL;
717 /* bus_space(9) says we dont free the subregions handle */
720 sc->sc_ports[port] = NULL;
726 * If hard is 0 perform a softreset of the port.
727 * If hard is 1 perform a hard reset of the port.
728 * If hard is 2 perform a hard reset of the port and cycle the phy.
730 * If at is non-NULL an indirect port via a port-multiplier is being
731 * reset, otherwise a direct port is being reset.
733 * NOTE: Indirect ports can only be soft-reset.
736 sili_port_reset(struct sili_port *ap, struct ata_port *at, int hard)
742 rc = sili_pm_hardreset(ap, at->at_target, hard);
744 rc = sili_port_hardreset(ap);
747 rc = sili_pm_softreset(ap, at->at_target);
749 rc = sili_port_softreset(ap);
755 * SILI soft reset, Section 10.4.1
757 * (at) will be NULL when soft-resetting a directly-attached device, and
758 * non-NULL when soft-resetting a device through a port multiplier.
760 * This function keeps port communications intact and attempts to generate
761 * a reset to the connected device using device commands.
764 sili_port_softreset(struct sili_port *ap)
766 struct sili_ccb *ccb = NULL;
767 struct sili_prb *prb;
774 kprintf("%s: START SOFTRESET\n", PORTNAME(ap));
777 ap->ap_state = AP_S_NORMAL;
780 * Prep the special soft-reset SII command.
782 ccb = sili_get_err_ccb(ap);
783 ccb->ccb_done = sili_empty_done;
784 ccb->ccb_xa.flags = ATA_F_POLL | ATA_F_AUTOSENSE | ATA_F_EXCLUSIVE;
785 ccb->ccb_xa.complete = sili_dummy_done;
786 ccb->ccb_xa.at = NULL;
789 bzero(&prb->prb_h2d, sizeof(prb->prb_h2d));
790 prb->prb_h2d.flags = 0;
791 prb->prb_control = SILI_PRB_CTRL_SOFTRESET;
792 prb->prb_override = 0;
793 prb->prb_xfer_count = 0;
795 ccb->ccb_xa.state = ATA_S_PENDING;
798 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb
800 if (sili_poll(ccb, 8000, sili_quick_timeout) != ATA_S_COMPLETE) {
801 kprintf("%s: First FIS failed\n", PORTNAME(ap));
805 sig = (prb->prb_d2h.lba_high << 24) |
806 (prb->prb_d2h.lba_mid << 16) |
807 (prb->prb_d2h.lba_low << 8) |
808 (prb->prb_d2h.sector_count);
810 kprintf("%s: SOFTRESET SIGNATURE %08x\n", PORTNAME(ap), sig);
813 * If the softreset is trying to clear a BSY condition after a
814 * normal portreset we assign the port type.
816 * If the softreset is being run first as part of the ccb error
817 * processing code then report if the device signature changed
820 if (ap->ap_type == ATA_PORT_T_NONE) {
821 ap->ap_type = sili_port_signature(ap, NULL, sig);
823 if (sili_port_signature(ap, NULL, sig) != ap->ap_type) {
824 kprintf("%s: device signature unexpectedly "
825 "changed\n", PORTNAME(ap));
826 error = EBUSY; /* XXX */
832 sili_put_err_ccb(ccb);
836 * If we failed to softreset make the port quiescent, otherwise
837 * make sure the port's start/stop state matches what it was on
840 * Don't kill the port if the softreset is on a port multiplier
841 * target, that would kill all the targets!
844 kprintf("%s: END SOFTRESET %d prob=%d state=%d\n",
845 PORTNAME(ap), error, ap->ap_probe, ap->ap_state);
848 sili_port_hardstop(ap);
849 /* ap_probe set to failed */
851 ap->ap_probe = ATA_PROBE_NEED_IDENT;
856 sili_pwrite(ap, SILI_PREG_SERR, -1);
858 kprintf("%s: END SOFTRESET\n", PORTNAME(ap));
864 * This function does a hard reset of the port. Note that the device
865 * connected to the port could still end-up hung. Phy detection is
866 * used to short-cut longer operations.
869 sili_port_hardreset(struct sili_port *ap)
876 kprintf("%s: START HARDRESET\n", PORTNAME(ap));
878 ap->ap_state = AP_S_NORMAL;
881 * Set SCTL up for any speed restrictions before issuing the
882 * device reset. This may also take us out of an INIT state
883 * (if we were previously in a continuous reset state from
884 * sili_port_listen()).
886 data = SILI_PREG_SCTL_SPM_NONE |
887 SILI_PREG_SCTL_IPM_NONE |
888 SILI_PREG_SCTL_SPD_NONE |
889 SILI_PREG_SCTL_DET_NONE;
890 if (SiliForceGen1 & (1 << ap->ap_num)) {
891 data &= ~SILI_PREG_SCTL_SPD_NONE;
892 data |= SILI_PREG_SCTL_SPD_GEN1;
894 sili_pwrite(ap, SILI_PREG_SCTL, data);
897 * The transition from a continuous COMRESET state from
898 * sili_port_listen() back to device detect can take a
899 * few seconds. It's quite non-deterministic. Most of
900 * the time it takes far less. Use a polling loop to
905 data = sili_pread(ap, SILI_PREG_SSTS);
906 if (data & SILI_PREG_SSTS_DET)
908 loop -= sili_os_softsleep();
913 * Issue Device Reset, give the phy a little time to settle down.
915 * NOTE: Unlike Port Reset, the port ready signal will not
916 * go active unless a device is established to be on
919 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA);
920 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME);
921 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET);
922 if (sili_pwait_clr(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET)) {
923 kprintf("%s: hardreset failed to clear\n", PORTNAME(ap));
928 * Try to determine if there is a device on the port.
930 * Give the device 3/10 second to at least be detected.
934 data = sili_pread(ap, SILI_PREG_SSTS);
935 if (data & SILI_PREG_SSTS_DET)
937 loop -= sili_os_softsleep();
941 kprintf("%s: Port appears to be unplugged\n",
949 * There is something on the port. Give the device 3 seconds
952 if (sili_pwait_eq(ap, 3000, SILI_PREG_SSTS,
953 SILI_PREG_SSTS_DET, SILI_PREG_SSTS_DET_DEV)) {
955 kprintf("%s: Device may be powered down\n",
963 * We got something that definitely looks like a device. Give
964 * the device time to send us its first D2H FIS.
966 * This effectively waits for BSY to clear.
968 if (sili_pwait_set_to(ap, 3000, SILI_PREG_STATUS,
969 SILI_PREG_STATUS_READY)) {
977 * Do the PM port probe regardless of how things turned out above.
979 * If the PM port probe fails it will return the original error
982 if (ap->ap_sc->sc_flags & SILI_F_SPM) {
983 error = sili_pm_port_probe(ap, error);
992 if (ap->ap_type == ATA_PORT_T_PM)
993 ap->ap_probe = ATA_PROBE_GOOD;
995 ap->ap_probe = ATA_PROBE_NEED_SOFT_RESET;
999 * No device detected.
1001 data = sili_pread(ap, SILI_PREG_SSTS);
1003 switch(data & SATA_PM_SSTS_DET) {
1004 case SILI_PREG_SSTS_DET_DEV_NE:
1005 kprintf("%s: Device not communicating\n",
1008 case SILI_PREG_SSTS_DET_OFFLINE:
1009 kprintf("%s: PHY offline\n",
1013 kprintf("%s: No device detected\n",
1017 sili_port_hardstop(ap);
1023 kprintf("%s: Device on port is bricked\n",
1025 sili_port_hardstop(ap);
1028 sili_pwrite(ap, SILI_PREG_SERR, -1);
1031 kprintf("%s: END HARDRESET %d\n", PORTNAME(ap), error);
1036 * Hard-stop on hot-swap device removal. See 10.10.1
1038 * Place the port in a mode that will allow it to detect hot-swap insertions.
1039 * This is a bit imprecise because just setting-up SCTL to DET_INIT doesn't
1040 * seem to do the job.
1043 sili_port_hardstop(struct sili_port *ap)
1045 struct sili_ccb *ccb;
1046 struct ata_port *at;
1050 ap->ap_state = AP_S_FATAL_ERROR;
1051 ap->ap_probe = ATA_PROBE_FAILED;
1052 ap->ap_type = ATA_PORT_T_NONE;
1055 * Clean up AT sub-ports on SATA port.
1057 for (i = 0; ap->ap_ata && i < SILI_MAX_PMPORTS; ++i) {
1058 at = &ap->ap_ata[i];
1059 at->at_type = ATA_PORT_T_NONE;
1060 at->at_probe = ATA_PROBE_FAILED;
1061 at->at_features &= ~ATA_PORT_F_READLOG;
1065 * Kill the port. Don't bother waiting for it to transition
1068 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET);
1069 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) {
1070 kprintf("%s: Port will not go into reset\n",
1074 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET);
1077 * Turn off port-multiplier control bit
1079 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA);
1082 * Clean up the command list.
1084 while (ap->ap_active) {
1085 slot = ffs(ap->ap_active) - 1;
1086 ap->ap_active &= ~(1 << slot);
1087 ap->ap_expired &= ~(1 << slot);
1088 --ap->ap_active_cnt;
1089 ccb = &ap->ap_ccbs[slot];
1090 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_RUNNING) {
1091 callout_stop(&ccb->ccb_timeout);
1092 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING;
1094 ccb->ccb_xa.flags &= ~(ATA_F_TIMEOUT_DESIRED |
1095 ATA_F_TIMEOUT_EXPIRED);
1096 ccb->ccb_xa.state = ATA_S_TIMEOUT;
1098 ccb->ccb_xa.complete(&ccb->ccb_xa);
1100 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) {
1101 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry);
1102 ccb->ccb_xa.state = ATA_S_TIMEOUT;
1103 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED;
1105 ccb->ccb_xa.complete(&ccb->ccb_xa);
1107 KKASSERT(ap->ap_active_cnt == 0);
1110 * Put the port into a listen mode, we want to get insertion/removal
1113 sili_port_listen(ap);
1117 * Place port into a listen mode for hotplug events only. The port has
1118 * already been reset and the command processor may not be ready due
1119 * to the lack of a device.
1122 sili_port_listen(struct sili_port *ap)
1127 data = SILI_PREG_SCTL_SPM_NONE |
1128 SILI_PREG_SCTL_IPM_NONE |
1129 SILI_PREG_SCTL_SPD_NONE |
1130 SILI_PREG_SCTL_DET_INIT;
1131 if (SiliForceGen1 & (1 << ap->ap_num)) {
1132 data &= ~SILI_PREG_SCTL_SPD_NONE;
1133 data |= SILI_PREG_SCTL_SPD_GEN1;
1137 sili_pwrite(ap, SILI_PREG_SERR, -1);
1138 sili_pwrite(ap, SILI_PREG_INT_ENABLE, SILI_PREG_INT_PHYRDYCHG |
1139 SILI_PREG_INT_DEVEXCHG);
1143 * Figure out what type of device is connected to the port, ATAPI or
1147 sili_port_signature(struct sili_port *ap, struct ata_port *at, u_int32_t sig)
1150 kprintf("%s: sig %08x\n", ATANAME(ap, at), sig);
1151 if ((sig & 0xffff0000) == (SATA_SIGNATURE_ATAPI & 0xffff0000)) {
1152 return(ATA_PORT_T_ATAPI);
1153 } else if ((sig & 0xffff0000) ==
1154 (SATA_SIGNATURE_PORT_MULTIPLIER & 0xffff0000)) {
1155 return(ATA_PORT_T_PM);
1157 return(ATA_PORT_T_DISK);
1162 * Load the DMA descriptor table for a CCB's buffer.
1164 * NOTE: ATA_F_PIO is auto-selected by sili part.
1167 sili_load_prb(struct sili_ccb *ccb)
1169 struct sili_port *ap = ccb->ccb_port;
1170 struct sili_softc *sc = ap->ap_sc;
1171 struct ata_xfer *xa = &ccb->ccb_xa;
1172 struct sili_prb *prb = ccb->ccb_prb;
1173 struct sili_sge *sge;
1174 bus_dmamap_t dmap = ccb->ccb_dmamap;
1178 * Set up the PRB. The PRB contains 2 SGE's (1 if it is an ATAPI
1179 * command). The SGE must be set up to link to the rest of our
1180 * SGE array, in blocks of four SGEs (a SGE table) starting at
1182 prb->prb_xfer_count = 0;
1183 prb->prb_control = 0;
1184 prb->prb_override = 0;
1185 sge = (ccb->ccb_xa.flags & ATA_F_PACKET) ?
1186 &prb->prb_sge_packet : &prb->prb_sge_normal;
1187 if (xa->datalen == 0) {
1188 sge->sge_flags = SILI_SGE_FLAGS_TRM | SILI_SGE_FLAGS_DRD;
1193 if (ccb->ccb_xa.flags & ATA_F_READ)
1194 prb->prb_control |= SILI_PRB_CTRL_READ;
1195 if (ccb->ccb_xa.flags & ATA_F_WRITE)
1196 prb->prb_control |= SILI_PRB_CTRL_WRITE;
1197 sge->sge_flags = SILI_SGE_FLAGS_LNK;
1199 sge->sge_paddr = ccb->ccb_prb_paddr +
1200 offsetof(struct sili_prb, prb_sge[0]);
1203 * Load our sge array.
1205 error = bus_dmamap_load(sc->sc_tag_data, dmap,
1206 xa->data, xa->datalen,
1207 sili_load_prb_callback,
1209 ((xa->flags & ATA_F_NOWAIT) ?
1210 BUS_DMA_NOWAIT : BUS_DMA_WAITOK));
1212 kprintf("%s: error %d loading dmamap\n", PORTNAME(ap), error);
1216 bus_dmamap_sync(sc->sc_tag_data, dmap,
1217 (xa->flags & ATA_F_READ) ?
1218 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1224 bus_dmamap_unload(sc->sc_tag_data, dmap);
1230 * Callback from BUSDMA system to load the segment list.
1232 * The scatter/gather table is loaded by the sili chip in blocks of
1233 * four SGE's. If a continuance is required the last entry in each
1234 * block must point to the next block.
1238 sili_load_prb_callback(void *info, bus_dma_segment_t *segs, int nsegs,
1241 struct sili_ccb *ccb = info;
1242 struct sili_sge *sge;
1245 KKASSERT(nsegs <= SILI_MAX_SGET);
1248 sge = &ccb->ccb_prb->prb_sge[0];
1250 if ((sgi & 3) == 3) {
1251 sge->sge_paddr = htole64(ccb->ccb_prb_paddr +
1252 offsetof(struct sili_prb,
1255 sge->sge_flags = SILI_SGE_FLAGS_LNK;
1257 sge->sge_paddr = htole64(segs->ds_addr);
1258 sge->sge_count = htole32(segs->ds_len);
1267 sge->sge_flags |= SILI_SGE_FLAGS_TRM;
1271 sili_unload_prb(struct sili_ccb *ccb)
1273 struct sili_port *ap = ccb->ccb_port;
1274 struct sili_softc *sc = ap->ap_sc;
1275 struct ata_xfer *xa = &ccb->ccb_xa;
1276 bus_dmamap_t dmap = ccb->ccb_dmamap;
1278 if (xa->datalen != 0) {
1279 bus_dmamap_sync(sc->sc_tag_data, dmap,
1280 (xa->flags & ATA_F_READ) ?
1281 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1283 bus_dmamap_unload(sc->sc_tag_data, dmap);
1285 if (ccb->ccb_xa.flags & ATA_F_NCQ)
1288 xa->resid = xa->datalen -
1289 le32toh(ccb->ccb_prb->prb_xfer_count);
1294 * Start a command and poll for completion.
1296 * timeout is in ms and only counts once the command gets on-chip.
1298 * Returns ATA_S_* state, compare against ATA_S_COMPLETE to determine
1299 * that no error occured.
1301 * NOTE: If the caller specifies a NULL timeout function the caller is
1302 * responsible for clearing hardware state on failure, but we will
1303 * deal with removing the ccb from any pending queue.
1305 * NOTE: NCQ should never be used with this function.
1307 * NOTE: If the port is in a failed state and stopped we do not try
1308 * to activate the ccb.
1311 sili_poll(struct sili_ccb *ccb, int timeout,
1312 void (*timeout_fn)(struct sili_ccb *))
1314 struct sili_port *ap = ccb->ccb_port;
1316 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR) {
1317 ccb->ccb_xa.state = ATA_S_ERROR;
1318 return(ccb->ccb_xa.state);
1324 sili_port_intr(ap, 1);
1325 switch(ccb->ccb_xa.state) {
1327 timeout -= sili_os_softsleep();
1331 * The packet can get stuck on the pending queue
1332 * if the port refuses to come ready. XXX
1335 if (xxx AP_F_EXCLUSIVE_ACCESS)
1336 timeout -= sili_os_softsleep();
1339 sili_os_softsleep();
1340 sili_check_active_timeouts(ap);
1343 return (ccb->ccb_xa.state);
1345 } while (timeout > 0);
1348 * Don't spew if this is a probe during hard reset
1350 if (ap->ap_probe != ATA_PROBE_NEED_HARD_RESET) {
1351 kprintf("%s: Poll timeout slot %d\n",
1352 ATANAME(ap, ccb->ccb_xa.at),
1358 return(ccb->ccb_xa.state);
1362 * When polling we have to check if the currently active CCB(s)
1363 * have timed out as the callout will be deadlocked while we
1364 * hold the port lock.
1367 sili_check_active_timeouts(struct sili_port *ap)
1369 struct sili_ccb *ccb;
1373 mask = ap->ap_active;
1375 tag = ffs(mask) - 1;
1376 mask &= ~(1 << tag);
1377 ccb = &ap->ap_ccbs[tag];
1378 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_EXPIRED) {
1379 sili_core_timeout(ccb, 0);
1387 sili_start_timeout(struct sili_ccb *ccb)
1389 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_DESIRED) {
1390 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_RUNNING;
1391 callout_reset(&ccb->ccb_timeout,
1392 (ccb->ccb_xa.timeout * hz + 999) / 1000,
1393 sili_ata_cmd_timeout_unserialized, ccb);
1398 sili_start(struct sili_ccb *ccb)
1400 struct sili_port *ap = ccb->ccb_port;
1402 struct sili_softc *sc = ap->ap_sc;
1405 KKASSERT(ccb->ccb_xa.state == ATA_S_PENDING);
1408 * Sync our SGE table and PRB
1410 bus_dmamap_sync(ap->ap_dmamem_prbs->adm_tag,
1411 ap->ap_dmamem_prbs->adm_map,
1412 BUS_DMASYNC_PREWRITE);
1415 * XXX dmamap for PRB XXX BUS_DMASYNC_PREWRITE
1419 * Controller will update shared memory!
1420 * XXX bus_dmamap_sync ... BUS_DMASYNC_PREREAD ...
1422 /* Prepare RFIS area for write by controller */
1425 * There's no point trying to optimize this, it only shaves a few
1426 * nanoseconds so just queue the command and call our generic issue.
1428 sili_issue_pending_commands(ap, ccb);
1433 * While holding the port lock acquire exclusive access to the port.
1435 * This is used when running the state machine to initialize and identify
1436 * targets over a port multiplier. Setting exclusive access prevents
1437 * sili_port_intr() from activating any requests sitting on the pending
1441 sili_beg_exclusive_access(struct sili_port *ap, struct ata_port *at)
1443 KKASSERT((ap->ap_flags & AP_F_EXCLUSIVE_ACCESS) == 0);
1444 ap->ap_flags |= AP_F_EXCLUSIVE_ACCESS;
1445 while (ap->ap_active) {
1446 sili_port_intr(ap, 1);
1447 sili_os_softsleep();
1452 sili_end_exclusive_access(struct sili_port *ap, struct ata_port *at)
1454 KKASSERT((ap->ap_flags & AP_F_EXCLUSIVE_ACCESS) != 0);
1455 ap->ap_flags &= ~AP_F_EXCLUSIVE_ACCESS;
1456 sili_issue_pending_commands(ap, NULL);
1461 * If ccb is not NULL enqueue and/or issue it.
1463 * If ccb is NULL issue whatever we can from the queue. However, nothing
1464 * new is issued if the exclusive access flag is set or expired ccb's are
1467 * If existing commands are still active (ap_active) we can only
1468 * issue matching new commands.
1471 sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb)
1476 * If just running the queue and in exclusive access mode we
1477 * just return. Also in this case if there are any expired ccb's
1478 * we want to clear the queue so the port can be safely stopped.
1480 * XXX sili chip - expiration needs to be per-target if PM supports
1484 TAILQ_INSERT_TAIL(&ap->ap_ccb_pending, ccb, ccb_entry);
1485 } else if (ap->ap_expired) {
1490 * Pull the next ccb off the queue and run it if possible.
1491 * If the port is not ready to accept commands enable the
1492 * ready interrupt instead of starting a new command.
1494 * XXX limit ncqdepth for attached devices behind PM
1496 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) {
1498 * Port may be wedged.
1500 if ((sili_pread(ap, SILI_PREG_STATUS) &
1501 SILI_PREG_STATUS_READY) == 0) {
1502 kprintf("%s: slot %d NOT READY\n",
1503 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot);
1504 sili_pwrite(ap, SILI_PREG_INT_ENABLE,
1505 SILI_PREG_INT_READY);
1510 * Handle exclusivity requirements. ATA_F_EXCLUSIVE is used
1511 * when we may have to access the rfis which is stored in
1512 * the LRAM PRB. Unfortunately reading the LRAM PRB is
1513 * highly problematic, so requests (like PM requests) which
1514 * need to access the rfis use exclusive mode and then
1515 * access the copy made by the port interrupt code back in
1518 if (ap->ap_active & ~ap->ap_expired) {
1520 * There may be multiple ccb's already running,
1521 * if any are running and ap_run_flags sets
1522 * one of these flags then we know only one is
1525 * XXX Current AUTOSENSE code forces exclusivity
1526 * to simplify the code.
1528 if (ap->ap_run_flags &
1529 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) {
1534 * If the ccb we want to run is exclusive and ccb's
1535 * are still active on the port, we can't queue it
1538 * XXX Current AUTOSENSE code forces exclusivity
1539 * to simplify the code.
1541 if (ccb->ccb_xa.flags &
1542 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) {
1547 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry);
1548 ccb->ccb_xa.state = ATA_S_ONCHIP;
1549 ap->ap_active |= 1 << ccb->ccb_slot;
1550 ap->ap_active_cnt++;
1551 ap->ap_run_flags = ccb->ccb_xa.flags;
1554 * We can't use the CMD_FIFO method because it requires us
1555 * building the PRB in the LRAM, and the LRAM is buggy. So
1556 * we use host memory for the PRB.
1558 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot),
1559 (u_int32_t)ccb->ccb_prb_paddr);
1560 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot) + 4,
1561 (u_int32_t)(ccb->ccb_prb_paddr >> 32));
1562 /* sili_pwrite(ap, SILI_PREG_CMD_FIFO, ccb->ccb_slot); */
1563 sili_start_timeout(ccb);
1568 sili_intr(void *arg)
1570 struct sili_softc *sc = arg;
1571 struct sili_port *ap;
1576 * Check if the master enable is up, and whether any interrupts are
1579 * Clear the ints we got.
1581 if ((sc->sc_flags & SILI_F_INT_GOOD) == 0)
1583 gint = sili_read(sc, SILI_REG_GINT);
1584 if (gint == 0 || gint == 0xffffffff)
1586 sili_write(sc, SILI_REG_GINT, gint);
1589 * Process interrupts for each port in a non-blocking fashion.
1591 while (gint & SILI_REG_GINT_PORTMASK) {
1592 port = ffs(gint) - 1;
1593 ap = sc->sc_ports[port];
1595 if (sili_os_lock_port_nb(ap) == 0) {
1596 sili_port_intr(ap, 0);
1597 sili_os_unlock_port(ap);
1599 sili_port_interrupt_redisable(ap);
1600 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT);
1603 gint &= ~(1 << port);
1608 * Core called from helper thread.
1611 sili_port_thread_core(struct sili_port *ap, int mask)
1614 * Process any expired timedouts.
1616 sili_os_lock_port(ap);
1617 if (mask & AP_SIGF_TIMEOUT) {
1618 sili_check_active_timeouts(ap);
1622 * Process port interrupts which require a higher level of
1625 if (mask & AP_SIGF_PORTINT) {
1626 sili_port_intr(ap, 1);
1627 sili_port_interrupt_reenable(ap);
1628 sili_os_unlock_port(ap);
1630 sili_os_unlock_port(ap);
1635 * Core per-port interrupt handler.
1637 * If blockable is 0 we cannot call sili_os_sleep() at all and we can only
1638 * deal with normal command completions which do not require blocking.
1641 sili_port_intr(struct sili_port *ap, int blockable)
1643 struct sili_softc *sc = ap->ap_sc;
1646 struct sili_ccb *ccb = NULL;
1647 struct ata_port *ccb_at = NULL;
1653 const u_int32_t blockable_mask = SILI_PREG_IST_PHYRDYCHG |
1654 SILI_PREG_IST_DEVEXCHG |
1655 SILI_PREG_IST_CERROR |
1656 SILI_PREG_IST_DECODE |
1658 SILI_PREG_IST_HANDSHK;
1659 const u_int32_t fatal_mask = SILI_PREG_IST_PHYRDYCHG |
1660 SILI_PREG_IST_DEVEXCHG |
1661 SILI_PREG_IST_DECODE |
1663 SILI_PREG_IST_HANDSHK;
1665 enum { NEED_NOTHING, NEED_HOTPLUG_INSERT,
1666 NEED_HOTPLUG_REMOVE } need = NEED_NOTHING;
1669 * NOTE: CCOMPLETE was automatically cleared when we read INT_STATUS.
1671 is = sili_pread(ap, SILI_PREG_INT_STATUS);
1672 is &= SILI_PREG_IST_MASK;
1673 if (is & SILI_PREG_IST_CCOMPLETE)
1674 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CCOMPLETE);
1677 * If we can't block then we can't handle these here. Disable
1678 * the interrupts in question so we don't live-lock, the helper
1679 * thread will re-enable them.
1681 * If the port is in a completely failed state we do not want
1682 * to drop through to failed-command-processing if blockable is 0,
1683 * just let the thread deal with it all.
1685 * Otherwise we fall through and still handle DHRS and any commands
1686 * which completed normally. Even if we are errored we haven't
1687 * stopped the port yet so CI/SACT are still good.
1689 if (blockable == 0) {
1690 if (ap->ap_state == AP_S_FATAL_ERROR) {
1691 sili_port_interrupt_redisable(ap);
1692 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT);
1693 /*is &= ~blockable_mask;*/
1696 if (is & blockable_mask) {
1697 sili_port_interrupt_redisable(ap);
1698 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT);
1699 /*is &= ~blockable_mask;*/
1704 if (is & SILI_PREG_IST_CERROR) {
1706 * Command failed (blockable).
1708 * This stops command processing. We can extract the PM
1709 * target from the PMP field in SILI_PREG_CONTEXT. The
1710 * tag is not necessarily valid so don't use that.
1712 * We must then expire all CCB's for that target and resume
1713 * processing if any other targets have active commands.
1714 * Particular error codes can be recovered by reading the LOG
1717 * The expire handling code will do the rest, which is
1718 * basically to reset the port once the only active
1719 * commands remaining are all expired.
1725 target = (sili_pread(ap, SILI_PREG_CONTEXT) >>
1726 SILI_PREG_CONTEXT_PMPORT_SHIFT) &
1727 SILI_PREG_CONTEXT_PMPORT_MASK;
1728 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CERROR);
1729 active = ap->ap_active & ~ap->ap_expired;
1730 error = sili_pread(ap, SILI_PREG_CERROR);
1731 kprintf("%s.%d target error %d active=%08x hactive=%08x "
1733 PORTNAME(ap), target, error,
1734 active, sili_pread(ap, SILI_PREG_SLOTST),
1735 sili_pread(ap, SILI_PREG_SERR), SILI_PFMT_SERR);
1738 slot = ffs(active) - 1;
1739 ccb = &ap->ap_ccbs[slot];
1740 if ((ccb_at = ccb->ccb_xa.at) == NULL)
1741 ccb_at = &ap->ap_ata[0];
1742 if (target == ccb_at->at_target) {
1743 if (ccb->ccb_xa.flags & ATA_F_NCQ &&
1744 (error == SILI_PREG_CERROR_DEVICE ||
1745 error == SILI_PREG_CERROR_SDBERROR)) {
1746 ccb_at->at_features |= ATA_PORT_F_READLOG;
1748 if (sili_core_timeout(ccb, 1) == 0)
1751 active &= ~(1 << slot);
1755 * Resume will be 0 if the timeout reinited and restarted
1756 * the port. Otherwise we resume the port to allow other
1757 * commands to complete.
1760 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESUME);
1764 * Device notification to us (non-blocking)
1766 * This is interrupt status SILIPREG_IST_SDB
1768 * NOTE! On some parts notification bits can get set without
1769 * generating an interrupt. It is unclear whether this is
1770 * a bug in the PM (sending a DTOH device setbits with 'N' set
1771 * and 'I' not set), or a bug in the host controller.
1773 * It only seems to occur under load.
1775 if (sc->sc_flags & SILI_F_SSNTF) {
1779 data = sili_pread(ap, SILI_PREG_SNTF);
1780 if (is & SILI_PREG_IST_SDB) {
1781 sili_pwrite(ap, SILI_PREG_INT_STATUS,
1783 is &= ~SILI_PREG_IST_SDB;
1784 xstr = " (no SDBS!)";
1789 kprintf("%s: NOTIFY %08x%s\n",
1790 PORTNAME(ap), data, xstr);
1791 sili_pwrite(ap, SILI_PREG_SNTF, data);
1792 sili_cam_changed(ap, NULL, -1);
1797 * Port change (hot-plug) (blockable).
1799 * A PCS interrupt will occur on hot-plug once communication is
1802 * A PRCS interrupt will occur on hot-unplug (and possibly also
1805 * XXX We can then check the CPS (Cold Presence State) bit, if
1806 * supported, to determine if a device is plugged in or not and do
1809 * WARNING: A PCS interrupt is cleared by clearing DIAG_X, and
1810 * can also occur if an unsolicited COMINIT is received.
1811 * If this occurs command processing is automatically
1812 * stopped (CR goes inactive) and the port must be stopped
1815 if (is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG)) {
1817 sili_pwrite(ap, SILI_PREG_SERR,
1818 (SILI_PREG_SERR_DIAG_N | SILI_PREG_SERR_DIAG_X));
1819 sili_pwrite(ap, SILI_PREG_INT_STATUS,
1820 is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG));
1822 is &= ~(SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG);
1823 kprintf("%s: Port change\n", PORTNAME(ap));
1825 switch (sili_pread(ap, SILI_PREG_SSTS) & SILI_PREG_SSTS_DET) {
1826 case SILI_PREG_SSTS_DET_DEV:
1827 if (ap->ap_type == ATA_PORT_T_NONE &&
1828 ap->ap_probe == ATA_PROBE_FAILED) {
1829 need = NEED_HOTPLUG_INSERT;
1834 kprintf("%s: Device lost\n", PORTNAME(ap));
1835 if (ap->ap_type != ATA_PORT_T_NONE) {
1836 need = NEED_HOTPLUG_REMOVE;
1844 * Check for remaining errors - they are fatal. (blockable)
1846 if (is & fatal_mask) {
1849 sili_pwrite(ap, SILI_PREG_INT_STATUS, is & fatal_mask);
1851 serr = sili_pread(ap, SILI_PREG_SERR);
1852 kprintf("%s: Unrecoverable errors (IS: %b, SERR: %b), "
1853 "disabling port.\n",
1855 is, SILI_PFMT_INT_STATUS,
1856 serr, SILI_PFMT_SERR
1859 /* XXX try recovery first */
1864 * Fail all outstanding commands if we know the port won't recover.
1866 * We may have a ccb_at if the failed command is known and was
1867 * being sent to a device over a port multiplier (PM). In this
1868 * case if the port itself has not completely failed we fail just
1869 * the commands related to that target.
1871 if (ap->ap_state == AP_S_FATAL_ERROR &&
1872 (ap->ap_active & ~ap->ap_expired)) {
1873 kprintf("%s: Fatal port error, expiring %08x\n",
1874 PORTNAME(ap), ap->ap_active & ~ap->ap_expired);
1876 ap->ap_state = AP_S_FATAL_ERROR;
1879 * Error all the active slots. If running across a PM
1880 * try to error out just the slots related to the target.
1882 active = ap->ap_active & ~ap->ap_expired;
1885 slot = ffs(active) - 1;
1886 active &= ~(1 << slot);
1887 ccb = &ap->ap_ccbs[slot];
1888 sili_core_timeout(ccb, 1);
1893 * CCB completion (non blocking).
1895 * CCB completion is detected by noticing the slot bit in
1896 * the port slot status register has cleared while the bit
1897 * is still set in our ap_active variable.
1899 * When completing expired events we must remember to reinit
1900 * the port once everything is clear.
1902 * Due to a single-level recursion when reading the log page,
1903 * it is possible for the slot to already have been cleared
1904 * for some expired tags, do not include expired tags in
1907 active = ap->ap_active & ~sili_pread(ap, SILI_PREG_SLOTST);
1908 active &= ~ap->ap_expired;
1912 slot = ffs(active) - 1;
1913 ccb = &ap->ap_ccbs[slot];
1915 DPRINTF(SILI_D_INTR, "%s: slot %d is complete%s\n",
1916 PORTNAME(ap), slot, ccb->ccb_xa.state == ATA_S_ERROR ?
1919 active &= ~(1 << slot);
1922 * XXX sync POSTREAD for return data?
1924 ap->ap_active &= ~(1 << ccb->ccb_slot);
1925 --ap->ap_active_cnt;
1928 * Complete the ccb. If the ccb was marked expired it
1929 * may or may not have been cleared from the port,
1930 * make sure we mark it as having timed out.
1932 * In a normal completion if AUTOSENSE is set we copy
1933 * the PRB LRAM rfis back to the rfis in host-memory.
1935 * XXX Currently AUTOSENSE also forces exclusivity so we
1936 * can safely work around a hardware bug when reading
1939 if (ap->ap_expired & (1 << ccb->ccb_slot)) {
1940 ap->ap_expired &= ~(1 << ccb->ccb_slot);
1941 ccb->ccb_xa.state = ATA_S_TIMEOUT;
1943 ccb->ccb_xa.complete(&ccb->ccb_xa);
1945 if (ccb->ccb_xa.state == ATA_S_ONCHIP) {
1946 ccb->ccb_xa.state = ATA_S_COMPLETE;
1947 if (ccb->ccb_xa.flags & ATA_F_AUTOSENSE) {
1948 memcpy(ccb->ccb_xa.rfis,
1949 &ccb->ccb_prb_lram->prb_d2h,
1950 sizeof(ccb->ccb_prb_lram->prb_d2h));
1951 if (ccb->ccb_xa.state == ATA_S_TIMEOUT)
1952 ccb->ccb_xa.state = ATA_S_ERROR;
1958 if (is & SILI_PREG_IST_READY) {
1959 is &= ~SILI_PREG_IST_READY;
1960 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_READY);
1961 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_READY);
1965 * If we had expired commands and were waiting for
1966 * remaining commands to complete, and they have now
1967 * completed, we can reinit the port.
1969 * This will also clean out the expired commands.
1970 * The timeout code also calls sili_port_reinit() if
1971 * the only commands remaining after a timeout are all
1972 * now expired commands.
1974 * Otherwise just reissue.
1976 if (ap->ap_expired && ap->ap_active == ap->ap_expired) {
1978 sili_port_reinit(ap);
1980 sili_issue_pending_commands(ap, NULL);
1984 * Cleanup. Will not be set if non-blocking.
1987 case NEED_HOTPLUG_INSERT:
1989 * A hot-plug insertion event has occured and all
1990 * outstanding commands have already been revoked.
1992 * Don't recurse if this occurs while we are
1993 * resetting the port.
1995 * Place the port in a continuous COMRESET state
1996 * until the INIT code gets to it.
1998 kprintf("%s: HOTPLUG - Device inserted\n",
2000 ap->ap_probe = ATA_PROBE_NEED_INIT;
2001 sili_cam_changed(ap, NULL, -1);
2003 case NEED_HOTPLUG_REMOVE:
2005 * A hot-plug removal event has occured and all
2006 * outstanding commands have already been revoked.
2008 * Don't recurse if this occurs while we are
2009 * resetting the port.
2011 kprintf("%s: HOTPLUG - Device removed\n",
2013 sili_port_hardstop(ap);
2014 /* ap_probe set to failed */
2015 sili_cam_changed(ap, NULL, -1);
2023 sili_get_ccb(struct sili_port *ap)
2025 struct sili_ccb *ccb;
2027 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE);
2028 ccb = TAILQ_FIRST(&ap->ap_ccb_free);
2030 KKASSERT(ccb->ccb_xa.state == ATA_S_PUT);
2031 TAILQ_REMOVE(&ap->ap_ccb_free, ccb, ccb_entry);
2032 ccb->ccb_xa.state = ATA_S_SETUP;
2033 ccb->ccb_xa.at = NULL;
2035 lockmgr(&ap->ap_ccb_lock, LK_RELEASE);
2041 sili_put_ccb(struct sili_ccb *ccb)
2043 struct sili_port *ap = ccb->ccb_port;
2046 if (ccb->ccb_xa.state != ATA_S_COMPLETE &&
2047 ccb->ccb_xa.state != ATA_S_TIMEOUT &&
2048 ccb->ccb_xa.state != ATA_S_ERROR) {
2049 kprintf("%s: invalid ata_xfer state %02x in sili_put_ccb, "
2051 PORTNAME(ccb->ccb_port), ccb->ccb_xa.state,
2056 ccb->ccb_xa.state = ATA_S_PUT;
2057 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE);
2058 TAILQ_INSERT_TAIL(&ap->ap_ccb_free, ccb, ccb_entry);
2059 lockmgr(&ap->ap_ccb_lock, LK_RELEASE);
2063 sili_get_err_ccb(struct sili_port *ap)
2065 struct sili_ccb *err_ccb;
2067 KKASSERT(sili_pread(ap, SILI_PREG_CI) == 0);
2068 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) == 0);
2069 ap->ap_flags |= AP_F_ERR_CCB_RESERVED;
2072 KKASSERT(ap->ap_err_busy == 0);
2073 ap->ap_err_busy = 1;
2076 * Grab a CCB to use for error recovery. This should never fail, as
2077 * we ask atascsi to reserve one for us at init time.
2079 err_ccb = ap->ap_err_ccb;
2080 KKASSERT(err_ccb != NULL);
2081 err_ccb->ccb_xa.flags = 0;
2082 err_ccb->ccb_done = sili_empty_done;
2088 sili_put_err_ccb(struct sili_ccb *ccb)
2090 struct sili_port *ap = ccb->ccb_port;
2093 KKASSERT(ap->ap_err_busy);
2095 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) != 0);
2097 KKASSERT(ccb == ap->ap_err_ccb);
2100 ap->ap_err_busy = 0;
2102 ap->ap_flags &= ~AP_F_ERR_CCB_RESERVED;
2106 * Read log page to get NCQ error.
2108 * Return 0 on success
2111 sili_port_read_ncq_error(struct sili_port *ap, int target)
2113 struct sili_ccb *ccb;
2114 struct ata_fis_h2d *fis;
2117 DPRINTF(SILI_D_VERBOSE, "%s: read log page\n", PORTNAME(ap));
2119 /* Prep error CCB for READ LOG EXT, page 10h, 1 sector. */
2120 ccb = sili_get_err_ccb(ap);
2121 ccb->ccb_done = sili_empty_done;
2122 ccb->ccb_xa.flags = ATA_F_NOWAIT | ATA_F_READ | ATA_F_POLL;
2123 ccb->ccb_xa.data = ap->ap_err_scratch;
2124 ccb->ccb_xa.datalen = 512;
2125 ccb->ccb_xa.complete = sili_dummy_done;
2126 ccb->ccb_xa.at = &ap->ap_ata[target];
2127 fis = &ccb->ccb_prb->prb_h2d;
2128 bzero(fis, sizeof(*fis));
2130 fis->type = ATA_FIS_TYPE_H2D;
2131 fis->flags = ATA_H2D_FLAGS_CMD | target;
2132 fis->command = ATA_C_READ_LOG_EXT;
2133 fis->lba_low = 0x10; /* queued error log page (10h) */
2134 fis->sector_count = 1; /* number of sectors (1) */
2135 fis->sector_count_exp = 0;
2136 fis->lba_mid = 0; /* starting offset */
2137 fis->lba_mid_exp = 0;
2141 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb
2143 if (sili_load_prb(ccb) != 0) {
2144 status = ATA_S_ERROR;
2146 ccb->ccb_xa.state = ATA_S_PENDING;
2147 status = sili_poll(ccb, 1000, sili_quick_timeout);
2151 * Just spew if it fails, there isn't much we can do at this point.
2153 if (status != ATA_S_COMPLETE) {
2154 kprintf("%s: log page read failed, slot %d was still active.\n",
2155 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot);
2158 /* Done with the error CCB now. */
2159 sili_unload_prb(ccb);
2160 sili_put_err_ccb(ccb);
2162 /* Extract failed register set and tags from the scratch space. */
2163 if (status == ATA_S_COMPLETE) {
2164 struct ata_log_page_10h *log;
2167 log = (struct ata_log_page_10h *)ap->ap_err_scratch;
2168 if (log->err_regs.type & ATA_LOG_10H_TYPE_NOTQUEUED) {
2170 * Not queued bit was set - wasn't an NCQ error?
2172 * XXX This bit seems to be set a lot even for NCQ
2177 * Copy back the log record as a D2H register FIS.
2179 err_slot = log->err_regs.type &
2180 ATA_LOG_10H_TYPE_TAG_MASK;
2181 ccb = &ap->ap_ccbs[err_slot];
2182 if (ap->ap_expired & (1 << ccb->ccb_slot)) {
2183 kprintf("%s: read NCQ error page slot=%d\n",
2184 ATANAME(ap, ccb->ccb_xa.at), err_slot
2186 memcpy(&ccb->ccb_prb->prb_d2h, &log->err_regs,
2187 sizeof(struct ata_fis_d2h));
2188 ccb->ccb_prb->prb_d2h.type = ATA_FIS_TYPE_D2H;
2189 ccb->ccb_prb->prb_d2h.flags = 0;
2190 if (ccb->ccb_xa.state == ATA_S_TIMEOUT)
2191 ccb->ccb_xa.state = ATA_S_ERROR;
2193 kprintf("%s: read NCQ error page slot=%d, "
2194 "slot does not match any cmds\n",
2195 ATANAME(ccb->ccb_port, ccb->ccb_xa.at),
2204 * Allocate memory for various structures DMAd by hardware. The maximum
2205 * number of segments for these tags is 1 so the DMA memory will have a
2206 * single physical base address.
2208 struct sili_dmamem *
2209 sili_dmamem_alloc(struct sili_softc *sc, bus_dma_tag_t tag)
2211 struct sili_dmamem *adm;
2214 adm = kmalloc(sizeof(*adm), M_DEVBUF, M_INTWAIT | M_ZERO);
2216 error = bus_dmamem_alloc(tag, (void **)&adm->adm_kva,
2217 BUS_DMA_ZERO, &adm->adm_map);
2220 error = bus_dmamap_load(tag, adm->adm_map,
2222 bus_dma_tag_getmaxsize(tag),
2223 sili_dmamem_saveseg, &adm->adm_busaddr,
2228 bus_dmamap_destroy(tag, adm->adm_map);
2229 adm->adm_map = NULL;
2230 adm->adm_tag = NULL;
2231 adm->adm_kva = NULL;
2233 kfree(adm, M_DEVBUF);
2241 sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error)
2243 KKASSERT(error == 0);
2244 KKASSERT(nsegs == 1);
2245 *(bus_addr_t *)info = segs->ds_addr;
2250 sili_dmamem_free(struct sili_softc *sc, struct sili_dmamem *adm)
2253 bus_dmamap_unload(adm->adm_tag, adm->adm_map);
2254 bus_dmamap_destroy(adm->adm_tag, adm->adm_map);
2255 adm->adm_map = NULL;
2256 adm->adm_tag = NULL;
2257 adm->adm_kva = NULL;
2259 kfree(adm, M_DEVBUF);
2263 sili_read(struct sili_softc *sc, bus_size_t r)
2265 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
2266 BUS_SPACE_BARRIER_READ);
2267 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
2271 sili_write(struct sili_softc *sc, bus_size_t r, u_int32_t v)
2273 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
2274 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
2275 BUS_SPACE_BARRIER_WRITE);
2279 sili_pread(struct sili_port *ap, bus_size_t r)
2281 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4,
2282 BUS_SPACE_BARRIER_READ);
2283 return (bus_space_read_4(ap->ap_sc->sc_iot, ap->ap_ioh, r));
2287 sili_pwrite(struct sili_port *ap, bus_size_t r, u_int32_t v)
2289 bus_space_write_4(ap->ap_sc->sc_iot, ap->ap_ioh, r, v);
2290 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4,
2291 BUS_SPACE_BARRIER_WRITE);
2295 * Wait up to (timeout) milliseconds for the masked port register to
2298 * Timeout is in milliseconds.
2301 sili_pwait_eq(struct sili_port *ap, int timeout,
2302 bus_size_t r, u_int32_t mask, u_int32_t target)
2307 * Loop hard up to 100uS
2309 for (t = 0; t < 100; ++t) {
2310 if ((sili_pread(ap, r) & mask) == target)
2312 sili_os_hardsleep(1); /* us */
2316 timeout -= sili_os_softsleep();
2317 if ((sili_pread(ap, r) & mask) == target)
2319 } while (timeout > 0);
2324 sili_wait_ne(struct sili_softc *sc, bus_size_t r, u_int32_t mask,
2330 * Loop hard up to 100uS
2332 for (t = 0; t < 100; ++t) {
2333 if ((sili_read(sc, r) & mask) != target)
2335 sili_os_hardsleep(1); /* us */
2339 * And one millisecond the slow way
2343 t -= sili_os_softsleep();
2344 if ((sili_read(sc, r) & mask) != target)
2353 * Acquire an ata transfer.
2355 * Pass a NULL at for direct-attached transfers, and a non-NULL at for
2356 * targets that go through the port multiplier.
2359 sili_ata_get_xfer(struct sili_port *ap, struct ata_port *at)
2361 struct sili_ccb *ccb;
2363 ccb = sili_get_ccb(ap);
2365 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer: NULL ccb\n",
2370 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer got slot %d\n",
2371 PORTNAME(ap), ccb->ccb_slot);
2373 bzero(ccb->ccb_xa.fis, sizeof(*ccb->ccb_xa.fis));
2374 ccb->ccb_xa.at = at;
2375 ccb->ccb_xa.fis->type = ATA_FIS_TYPE_H2D;
2377 return (&ccb->ccb_xa);
2381 sili_ata_put_xfer(struct ata_xfer *xa)
2383 struct sili_ccb *ccb = (struct sili_ccb *)xa;
2385 DPRINTF(SILI_D_XFER, "sili_ata_put_xfer slot %d\n", ccb->ccb_slot);
2391 sili_ata_cmd(struct ata_xfer *xa)
2393 struct sili_ccb *ccb = (struct sili_ccb *)xa;
2395 KKASSERT(xa->state == ATA_S_SETUP);
2397 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR)
2400 kprintf("%s: started std command %b ccb %d ccb_at %p %d\n",
2401 ATANAME(ccb->ccb_port, ccb->ccb_xa.at),
2402 sili_pread(ccb->ccb_port, SILI_PREG_CMD), SILI_PFMT_CMD,
2405 ccb->ccb_xa.at ? ccb->ccb_xa.at->at_target : -1);
2408 ccb->ccb_done = sili_ata_cmd_done;
2410 if (sili_load_prb(ccb) != 0)
2413 xa->state = ATA_S_PENDING;
2415 if (xa->flags & ATA_F_POLL)
2416 return (sili_poll(ccb, xa->timeout, sili_ata_cmd_timeout));
2419 KKASSERT((xa->flags & ATA_F_TIMEOUT_EXPIRED) == 0);
2420 xa->flags |= ATA_F_TIMEOUT_DESIRED;
2427 xa->state = ATA_S_ERROR;
2430 return (ATA_S_ERROR);
2434 sili_ata_cmd_done(struct sili_ccb *ccb)
2436 struct ata_xfer *xa = &ccb->ccb_xa;
2439 * NOTE: callout does not lock port and may race us modifying
2440 * the flags, so make sure its stopped.
2442 if (xa->flags & ATA_F_TIMEOUT_RUNNING) {
2443 callout_stop(&ccb->ccb_timeout);
2444 xa->flags &= ~ATA_F_TIMEOUT_RUNNING;
2446 xa->flags &= ~(ATA_F_TIMEOUT_DESIRED | ATA_F_TIMEOUT_EXPIRED);
2448 KKASSERT(xa->state != ATA_S_ONCHIP);
2449 sili_unload_prb(ccb);
2452 else if (xa->state != ATA_S_ERROR && xa->state != ATA_S_TIMEOUT)
2453 kprintf("%s: invalid ata_xfer state %02x in sili_ata_cmd_done, "
2455 PORTNAME(ccb->ccb_port), xa->state, ccb->ccb_slot);
2457 if (xa->state != ATA_S_TIMEOUT)
2462 * Timeout from callout, MPSAFE - nothing can mess with the CCB's flags
2463 * while the callout is runing.
2465 * We can't safely get the port lock here or delay, we could block
2466 * the callout thread.
2469 sili_ata_cmd_timeout_unserialized(void *arg)
2471 struct sili_ccb *ccb = arg;
2472 struct sili_port *ap = ccb->ccb_port;
2474 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING;
2475 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_EXPIRED;
2476 sili_os_signal_port_thread(ap, AP_SIGF_TIMEOUT);
2480 sili_ata_cmd_timeout(struct sili_ccb *ccb)
2482 sili_core_timeout(ccb, 0);
2486 * Timeout code, typically called when the port command processor is running.
2488 * Returns 0 if all timeout processing completed, non-zero if it is still
2493 sili_core_timeout(struct sili_ccb *ccb, int really_error)
2495 struct ata_xfer *xa = &ccb->ccb_xa;
2496 struct sili_port *ap = ccb->ccb_port;
2497 struct ata_port *at;
2499 at = ccb->ccb_xa.at;
2501 kprintf("%s: CMD %s state=%d slot=%d\n"
2506 (really_error ? "ERROR" : "TIMEOUT"),
2507 ccb->ccb_xa.state, ccb->ccb_slot,
2510 sili_pread(ap, SILI_PREG_SLOTST)
2514 * NOTE: Timeout will not be running if the command was polled.
2515 * If we got here at least one of these flags should be set.
2517 * However, it might be running if we are called from the
2518 * interrupt error handling code.
2520 KKASSERT(xa->flags & (ATA_F_POLL | ATA_F_TIMEOUT_DESIRED |
2521 ATA_F_TIMEOUT_RUNNING));
2522 if (xa->flags & ATA_F_TIMEOUT_RUNNING) {
2523 callout_stop(&ccb->ccb_timeout);
2524 xa->flags &= ~ATA_F_TIMEOUT_RUNNING;
2526 xa->flags &= ~ATA_F_TIMEOUT_EXPIRED;
2528 if (ccb->ccb_xa.state == ATA_S_PENDING) {
2529 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry);
2530 ccb->ccb_xa.state = ATA_S_TIMEOUT;
2533 sili_issue_pending_commands(ap, NULL);
2536 if (ccb->ccb_xa.state != ATA_S_ONCHIP) {
2537 kprintf("%s: Unexpected state during timeout: %d\n",
2538 ATANAME(ap, at), ccb->ccb_xa.state);
2543 * We can't process timeouts while other commands are running.
2545 ap->ap_expired |= 1 << ccb->ccb_slot;
2547 if (ap->ap_active != ap->ap_expired) {
2548 kprintf("%s: Deferred timeout until its safe, slot %d\n",
2549 ATANAME(ap, at), ccb->ccb_slot);
2554 * We have to issue a Port reinit. We don't read an error log
2555 * page for timeouts. Reiniting the port will clear all pending
2558 sili_port_reinit(ap);
2563 * Used by the softreset, pm_port_probe, and read_ncq_error only, in very
2564 * specialized, controlled circumstances.
2567 sili_quick_timeout(struct sili_ccb *ccb)
2569 struct sili_port *ap = ccb->ccb_port;
2571 switch (ccb->ccb_xa.state) {
2573 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry);
2574 ccb->ccb_xa.state = ATA_S_TIMEOUT;
2577 KKASSERT((ap->ap_active & ~ap->ap_expired) ==
2578 (1 << ccb->ccb_slot));
2579 ccb->ccb_xa.state = ATA_S_TIMEOUT;
2580 ap->ap_active &= ~(1 << ccb->ccb_slot);
2581 KKASSERT(ap->ap_active_cnt > 0);
2582 --ap->ap_active_cnt;
2583 sili_port_reinit(ap);
2586 panic("%s: sili_quick_timeout: ccb in bad state %d",
2587 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_xa.state);
2592 sili_dummy_done(struct ata_xfer *xa)
2597 sili_empty_done(struct sili_ccb *ccb)