2 * Copyright (c) 2003 Hidetoshi Shimokawa
3 * Copyright (c) 1998-2002 Katsushi Kobayashi and Hidetoshi Shimokawa
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 * must display the acknowledgement as bellow:
17 * This product includes software developed by K. Kobayashi and H. Shimokawa
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
26 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
28 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
34 * $FreeBSD: src/sys/dev/firewire/fwohci.c,v 1.1.2.19 2003/05/01 06:24:37 simokawa Exp $
45 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/types.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/signalvar.h>
54 #include <sys/malloc.h>
55 #include <sys/sockio.h>
57 #include <sys/kernel.h>
59 #include <sys/endian.h>
61 #include <machine/bus.h>
62 #include <machine/resource.h>
65 #include <machine/cpufunc.h> /* for rdtsc proto for clock.h below */
66 #include <machine/clock.h>
67 #include <pci/pcivar.h>
68 #include <pci/pcireg.h>
70 #include <dev/firewire/firewire.h>
71 #include <dev/firewire/firewirereg.h>
72 #include <dev/firewire/fwdma.h>
73 #include <dev/firewire/fwohcireg.h>
74 #include <dev/firewire/fwohcivar.h>
75 #include <dev/firewire/firewire_phy.h>
77 #include <dev/firewire/iec68113.h>
81 static char dbcode[16][0x10]={"OUTM", "OUTL","INPM","INPL",
82 "STOR","LOAD","NOP ","STOP",};
84 static char dbkey[8][0x10]={"ST0", "ST1","ST2","ST3",
85 "UNDEF","REG","SYS","DEV"};
86 static char dbcond[4][0x10]={"NEV","C=1", "C=0", "ALL"};
87 char fwohcicode[32][0x20]={
88 "No stat","Undef","long","miss Ack err",
89 "underrun","overrun","desc err", "data read err",
90 "data write err","bus reset","timeout","tcode err",
91 "Undef","Undef","unknown event","flushed",
92 "Undef","ack complete","ack pend","Undef",
93 "ack busy_X","ack busy_A","ack busy_B","Undef",
94 "Undef","Undef","Undef","ack tardy",
95 "Undef","ack data_err","ack type_err",""};
98 extern char linkspeed[MAX_SPEED+1][0x10];
99 u_int32_t tagbit[4] = { 1 << 28, 1 << 29, 1 << 30, 1 << 31};
101 static struct tcode_info tinfo[] = {
102 /* hdr_len block flag*/
103 /* 0 WREQQ */ {16, FWTI_REQ | FWTI_TLABEL},
104 /* 1 WREQB */ {16, FWTI_REQ | FWTI_TLABEL | FWTI_BLOCK_ASY},
105 /* 2 WRES */ {12, FWTI_RES},
107 /* 4 RREQQ */ {12, FWTI_REQ | FWTI_TLABEL},
108 /* 5 RREQB */ {16, FWTI_REQ | FWTI_TLABEL},
109 /* 6 RRESQ */ {16, FWTI_RES},
110 /* 7 RRESB */ {16, FWTI_RES | FWTI_BLOCK_ASY},
111 /* 8 CYCS */ { 0, 0},
112 /* 9 LREQ */ {16, FWTI_REQ | FWTI_TLABEL | FWTI_BLOCK_ASY},
113 /* a STREAM */ { 4, FWTI_REQ | FWTI_BLOCK_STR},
114 /* b LRES */ {16, FWTI_RES | FWTI_BLOCK_ASY},
117 /* e PHY */ {12, FWTI_REQ},
121 #define OHCI_WRITE_SIGMASK 0xffff0000
122 #define OHCI_READ_SIGMASK 0xffff0000
124 #define OWRITE(sc, r, x) bus_space_write_4((sc)->bst, (sc)->bsh, (r), (x))
125 #define OREAD(sc, r) bus_space_read_4((sc)->bst, (sc)->bsh, (r))
127 static void fwohci_ibr __P((struct firewire_comm *));
128 static void fwohci_db_init __P((struct fwohci_softc *, struct fwohci_dbch *));
129 static void fwohci_db_free __P((struct fwohci_dbch *));
130 static void fwohci_arcv __P((struct fwohci_softc *, struct fwohci_dbch *, int));
131 static void fwohci_txd __P((struct fwohci_softc *, struct fwohci_dbch *));
132 static void fwohci_start_atq __P((struct firewire_comm *));
133 static void fwohci_start_ats __P((struct firewire_comm *));
134 static void fwohci_start __P((struct fwohci_softc *, struct fwohci_dbch *));
135 static u_int32_t fwphy_wrdata __P(( struct fwohci_softc *, u_int32_t, u_int32_t));
136 static u_int32_t fwphy_rddata __P(( struct fwohci_softc *, u_int32_t));
137 static int fwohci_rx_enable __P((struct fwohci_softc *, struct fwohci_dbch *));
138 static int fwohci_tx_enable __P((struct fwohci_softc *, struct fwohci_dbch *));
139 static int fwohci_irx_enable __P((struct firewire_comm *, int));
140 static int fwohci_irx_disable __P((struct firewire_comm *, int));
141 #if BYTE_ORDER == BIG_ENDIAN
142 static void fwohci_irx_post __P((struct firewire_comm *, u_int32_t *));
144 static int fwohci_itxbuf_enable __P((struct firewire_comm *, int));
145 static int fwohci_itx_disable __P((struct firewire_comm *, int));
146 static void fwohci_timeout __P((void *));
147 static void fwohci_poll __P((struct firewire_comm *, int, int));
148 static void fwohci_set_intr __P((struct firewire_comm *, int));
150 static int fwohci_add_rx_buf __P((struct fwohci_dbch *, struct fwohcidb_tr *, int, struct fwdma_alloc *));
151 static int fwohci_add_tx_buf __P((struct fwohci_dbch *, struct fwohcidb_tr *, int));
152 static void dump_db __P((struct fwohci_softc *, u_int32_t));
153 static void print_db __P((struct fwohcidb_tr *, volatile struct fwohcidb *, u_int32_t , u_int32_t));
154 static void dump_dma __P((struct fwohci_softc *, u_int32_t));
155 static u_int32_t fwohci_cyctimer __P((struct firewire_comm *));
156 static void fwohci_rbuf_update __P((struct fwohci_softc *, int));
157 static void fwohci_tbuf_update __P((struct fwohci_softc *, int));
158 void fwohci_txbufdb __P((struct fwohci_softc *, int , struct fw_bulkxfer *));
160 static void fwohci_complete(void *, int);
164 * memory allocated for DMA programs
166 #define DMA_PROG_ALLOC (8 * PAGE_SIZE)
168 /* #define NDB 1024 */
169 #define NDB FWMAXQUEUE
170 #define NDVDB (DVBUF * NDB)
172 #define OHCI_VERSION 0x00
173 #define OHCI_ATRETRY 0x08
174 #define OHCI_CROMHDR 0x18
175 #define OHCI_BUS_OPT 0x20
176 #define OHCI_BUSIRMC (1 << 31)
177 #define OHCI_BUSCMC (1 << 30)
178 #define OHCI_BUSISC (1 << 29)
179 #define OHCI_BUSBMC (1 << 28)
180 #define OHCI_BUSPMC (1 << 27)
181 #define OHCI_BUSFNC OHCI_BUSIRMC | OHCI_BUSCMC | OHCI_BUSISC |\
182 OHCI_BUSBMC | OHCI_BUSPMC
184 #define OHCI_EUID_HI 0x24
185 #define OHCI_EUID_LO 0x28
187 #define OHCI_CROMPTR 0x34
188 #define OHCI_HCCCTL 0x50
189 #define OHCI_HCCCTLCLR 0x54
190 #define OHCI_AREQHI 0x100
191 #define OHCI_AREQHICLR 0x104
192 #define OHCI_AREQLO 0x108
193 #define OHCI_AREQLOCLR 0x10c
194 #define OHCI_PREQHI 0x110
195 #define OHCI_PREQHICLR 0x114
196 #define OHCI_PREQLO 0x118
197 #define OHCI_PREQLOCLR 0x11c
198 #define OHCI_PREQUPPER 0x120
200 #define OHCI_SID_BUF 0x64
201 #define OHCI_SID_CNT 0x68
202 #define OHCI_SID_ERR (1 << 31)
203 #define OHCI_SID_CNT_MASK 0xffc
205 #define OHCI_IT_STAT 0x90
206 #define OHCI_IT_STATCLR 0x94
207 #define OHCI_IT_MASK 0x98
208 #define OHCI_IT_MASKCLR 0x9c
210 #define OHCI_IR_STAT 0xa0
211 #define OHCI_IR_STATCLR 0xa4
212 #define OHCI_IR_MASK 0xa8
213 #define OHCI_IR_MASKCLR 0xac
215 #define OHCI_LNKCTL 0xe0
216 #define OHCI_LNKCTLCLR 0xe4
218 #define OHCI_PHYACCESS 0xec
219 #define OHCI_CYCLETIMER 0xf0
221 #define OHCI_DMACTL(off) (off)
222 #define OHCI_DMACTLCLR(off) (off + 4)
223 #define OHCI_DMACMD(off) (off + 0xc)
224 #define OHCI_DMAMATCH(off) (off + 0x10)
226 #define OHCI_ATQOFF 0x180
227 #define OHCI_ATQCTL OHCI_ATQOFF
228 #define OHCI_ATQCTLCLR (OHCI_ATQOFF + 4)
229 #define OHCI_ATQCMD (OHCI_ATQOFF + 0xc)
230 #define OHCI_ATQMATCH (OHCI_ATQOFF + 0x10)
232 #define OHCI_ATSOFF 0x1a0
233 #define OHCI_ATSCTL OHCI_ATSOFF
234 #define OHCI_ATSCTLCLR (OHCI_ATSOFF + 4)
235 #define OHCI_ATSCMD (OHCI_ATSOFF + 0xc)
236 #define OHCI_ATSMATCH (OHCI_ATSOFF + 0x10)
238 #define OHCI_ARQOFF 0x1c0
239 #define OHCI_ARQCTL OHCI_ARQOFF
240 #define OHCI_ARQCTLCLR (OHCI_ARQOFF + 4)
241 #define OHCI_ARQCMD (OHCI_ARQOFF + 0xc)
242 #define OHCI_ARQMATCH (OHCI_ARQOFF + 0x10)
244 #define OHCI_ARSOFF 0x1e0
245 #define OHCI_ARSCTL OHCI_ARSOFF
246 #define OHCI_ARSCTLCLR (OHCI_ARSOFF + 4)
247 #define OHCI_ARSCMD (OHCI_ARSOFF + 0xc)
248 #define OHCI_ARSMATCH (OHCI_ARSOFF + 0x10)
250 #define OHCI_ITOFF(CH) (0x200 + 0x10 * (CH))
251 #define OHCI_ITCTL(CH) (OHCI_ITOFF(CH))
252 #define OHCI_ITCTLCLR(CH) (OHCI_ITOFF(CH) + 4)
253 #define OHCI_ITCMD(CH) (OHCI_ITOFF(CH) + 0xc)
255 #define OHCI_IROFF(CH) (0x400 + 0x20 * (CH))
256 #define OHCI_IRCTL(CH) (OHCI_IROFF(CH))
257 #define OHCI_IRCTLCLR(CH) (OHCI_IROFF(CH) + 4)
258 #define OHCI_IRCMD(CH) (OHCI_IROFF(CH) + 0xc)
259 #define OHCI_IRMATCH(CH) (OHCI_IROFF(CH) + 0x10)
261 d_ioctl_t fwohci_ioctl;
264 * Communication with PHY device
267 fwphy_wrdata( struct fwohci_softc *sc, u_int32_t addr, u_int32_t data)
274 fun = (PHYDEV_WRCMD | (addr << PHYDEV_REGADDR) | (data << PHYDEV_WRDATA));
275 OWRITE(sc, OHCI_PHYACCESS, fun);
278 return(fwphy_rddata( sc, addr));
282 fwohci_set_bus_manager(struct firewire_comm *fc, u_int node)
284 struct fwohci_softc *sc = (struct fwohci_softc *)fc;
288 #define OHCI_CSR_DATA 0x0c
289 #define OHCI_CSR_COMP 0x10
290 #define OHCI_CSR_CONT 0x14
291 #define OHCI_BUS_MANAGER_ID 0
293 OWRITE(sc, OHCI_CSR_DATA, node);
294 OWRITE(sc, OHCI_CSR_COMP, 0x3f);
295 OWRITE(sc, OHCI_CSR_CONT, OHCI_BUS_MANAGER_ID);
296 for (i = 0; !(OREAD(sc, OHCI_CSR_CONT) & (1<<31)) && (i < 1000); i++)
298 bm = OREAD(sc, OHCI_CSR_DATA);
299 if((bm & 0x3f) == 0x3f)
302 device_printf(sc->fc.dev,
303 "fw_set_bus_manager: %d->%d (loop=%d)\n", bm, node, i);
309 fwphy_rddata(struct fwohci_softc *sc, u_int addr)
315 #define MAX_RETRY 100
317 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_REG_FAIL);
318 fun = PHYDEV_RDCMD | (addr << PHYDEV_REGADDR);
319 OWRITE(sc, OHCI_PHYACCESS, fun);
320 for ( i = 0 ; i < MAX_RETRY ; i ++ ){
321 fun = OREAD(sc, OHCI_PHYACCESS);
322 if ((fun & PHYDEV_RDCMD) == 0 && (fun & PHYDEV_RDDONE) != 0)
328 device_printf(sc->fc.dev, "phy read failed(1).\n");
329 if (++retry < MAX_RETRY) {
334 /* Make sure that SCLK is started */
335 stat = OREAD(sc, FWOHCI_INTSTAT);
336 if ((stat & OHCI_INT_REG_FAIL) != 0 ||
337 ((fun >> PHYDEV_REGADDR) & 0xf) != addr) {
339 device_printf(sc->fc.dev, "phy read failed(2).\n");
340 if (++retry < MAX_RETRY) {
345 if (bootverbose || retry >= MAX_RETRY)
346 device_printf(sc->fc.dev,
347 "fwphy_rddata: loop=%d, retry=%d\n", i, retry);
349 return((fun >> PHYDEV_RDDATA )& 0xff);
351 /* Device specific ioctl. */
353 fwohci_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, fw_proc *td)
355 struct firewire_softc *sc;
356 struct fwohci_softc *fc;
357 int unit = DEV2UNIT(dev);
359 struct fw_reg_req_t *reg = (struct fw_reg_req_t *) data;
360 u_int32_t *dmach = (u_int32_t *) data;
362 sc = devclass_get_softc(firewire_devclass, unit);
366 fc = (struct fwohci_softc *)sc->fc;
373 #define OHCI_MAX_REG 0x800
374 if(reg->addr <= OHCI_MAX_REG){
375 OWRITE(fc, reg->addr, reg->data);
376 reg->data = OREAD(fc, reg->addr);
382 if(reg->addr <= OHCI_MAX_REG){
383 reg->data = OREAD(fc, reg->addr);
388 /* Read DMA descriptors for debug */
390 if(*dmach <= OHCI_MAX_DMA_CH ){
391 dump_dma(fc, *dmach);
404 fwohci_probe_phy(struct fwohci_softc *sc, device_t dev)
409 * probe PHY parameters
410 * 0. to prove PHY version, whether compliance of 1394a.
411 * 1. to probe maximum speed supported by the PHY and
412 * number of port supported by core-logic.
413 * It is not actually available port on your PC .
415 OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_LPS);
416 reg = fwphy_rddata(sc, FW_PHY_SPD_REG);
418 if((reg >> 5) != 7 ){
419 sc->fc.mode &= ~FWPHYASYST;
420 sc->fc.nport = reg & FW_PHY_NP;
421 sc->fc.speed = reg & FW_PHY_SPD >> 6;
422 if (sc->fc.speed > MAX_SPEED) {
423 device_printf(dev, "invalid speed %d (fixed to %d).\n",
424 sc->fc.speed, MAX_SPEED);
425 sc->fc.speed = MAX_SPEED;
428 "Phy 1394 only %s, %d ports.\n",
429 linkspeed[sc->fc.speed], sc->fc.nport);
431 reg2 = fwphy_rddata(sc, FW_PHY_ESPD_REG);
432 sc->fc.mode |= FWPHYASYST;
433 sc->fc.nport = reg & FW_PHY_NP;
434 sc->fc.speed = (reg2 & FW_PHY_ESPD) >> 5;
435 if (sc->fc.speed > MAX_SPEED) {
436 device_printf(dev, "invalid speed %d (fixed to %d).\n",
437 sc->fc.speed, MAX_SPEED);
438 sc->fc.speed = MAX_SPEED;
441 "Phy 1394a available %s, %d ports.\n",
442 linkspeed[sc->fc.speed], sc->fc.nport);
444 /* check programPhyEnable */
445 reg2 = fwphy_rddata(sc, 5);
447 if (e1394a && (OREAD(sc, OHCI_HCCCTL) & OHCI_HCC_PRPHY)) {
448 #else /* XXX force to enable 1394a */
453 "Enable 1394a Enhancements\n");
456 /* set aPhyEnhanceEnable */
457 OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_PHYEN);
458 OWRITE(sc, OHCI_HCCCTLCLR, OHCI_HCC_PRPHY);
463 reg2 = fwphy_wrdata(sc, 5, reg2);
466 reg = fwphy_rddata(sc, FW_PHY_SPD_REG);
467 if((reg >> 5) == 7 ){
468 reg = fwphy_rddata(sc, 4);
470 fwphy_wrdata(sc, 4, reg);
471 reg = fwphy_rddata(sc, 4);
478 fwohci_reset(struct fwohci_softc *sc, device_t dev)
480 int i, max_rec, speed;
482 struct fwohcidb_tr *db_tr;
484 /* Disable interrupt */
485 OWRITE(sc, FWOHCI_INTMASKCLR, ~0);
487 /* Now stopping all DMA channel */
488 OWRITE(sc, OHCI_ARQCTLCLR, OHCI_CNTL_DMA_RUN);
489 OWRITE(sc, OHCI_ARSCTLCLR, OHCI_CNTL_DMA_RUN);
490 OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN);
491 OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN);
493 OWRITE(sc, OHCI_IR_MASKCLR, ~0);
494 for( i = 0 ; i < sc->fc.nisodma ; i ++ ){
495 OWRITE(sc, OHCI_IRCTLCLR(i), OHCI_CNTL_DMA_RUN);
496 OWRITE(sc, OHCI_ITCTLCLR(i), OHCI_CNTL_DMA_RUN);
499 /* FLUSH FIFO and reset Transmitter/Reciever */
500 OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_RESET);
502 device_printf(dev, "resetting OHCI...");
504 while(OREAD(sc, OHCI_HCCCTL) & OHCI_HCC_RESET) {
505 if (i++ > 100) break;
509 printf("done (loop=%d)\n", i);
512 fwohci_probe_phy(sc, dev);
515 reg = OREAD(sc, OHCI_BUS_OPT);
516 reg2 = reg | OHCI_BUSFNC;
517 max_rec = (reg & 0x0000f000) >> 12;
518 speed = (reg & 0x00000007);
519 device_printf(dev, "Link %s, max_rec %d bytes.\n",
520 linkspeed[speed], MAXREC(max_rec));
521 /* XXX fix max_rec */
522 sc->fc.maxrec = sc->fc.speed + 8;
523 if (max_rec != sc->fc.maxrec) {
524 reg2 = (reg2 & 0xffff0fff) | (sc->fc.maxrec << 12);
525 device_printf(dev, "max_rec %d -> %d\n",
526 MAXREC(max_rec), MAXREC(sc->fc.maxrec));
529 device_printf(dev, "BUS_OPT 0x%x -> 0x%x\n", reg, reg2);
530 OWRITE(sc, OHCI_BUS_OPT, reg2);
532 /* Initialize registers */
533 OWRITE(sc, OHCI_CROMHDR, sc->fc.config_rom[0]);
534 OWRITE(sc, OHCI_CROMPTR, sc->crom_dma.bus_addr);
535 OWRITE(sc, OHCI_HCCCTLCLR, OHCI_HCC_BIGEND);
536 OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_POSTWR);
537 OWRITE(sc, OHCI_SID_BUF, sc->sid_dma.bus_addr);
538 OWRITE(sc, OHCI_LNKCTL, OHCI_CNTL_SID);
539 fw_busreset(&sc->fc);
542 OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_LINKEN);
544 /* Force to start async RX DMA */
545 sc->arrq.xferq.flag &= ~FWXFERQ_RUNNING;
546 sc->arrs.xferq.flag &= ~FWXFERQ_RUNNING;
547 fwohci_rx_enable(sc, &sc->arrq);
548 fwohci_rx_enable(sc, &sc->arrs);
550 /* Initialize async TX */
551 OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN | OHCI_CNTL_DMA_DEAD);
552 OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN | OHCI_CNTL_DMA_DEAD);
554 OWRITE(sc, FWOHCI_RETRY,
555 /* CycleLimit PhyRespRetries ATRespRetries ATReqRetries */
556 (0xffff << 16 ) | (0x0f << 8) | (0x0f << 4) | 0x0f) ;
557 for( i = 0, db_tr = sc->atrq.top; i < sc->atrq.ndb ;
558 i ++, db_tr = STAILQ_NEXT(db_tr, link)){
561 for( i = 0, db_tr = sc->atrs.top; i < sc->atrs.ndb ;
562 i ++, db_tr = STAILQ_NEXT(db_tr, link)){
567 /* Enable interrupt */
568 OWRITE(sc, FWOHCI_INTMASK,
569 OHCI_INT_ERR | OHCI_INT_PHY_SID
570 | OHCI_INT_DMA_ATRQ | OHCI_INT_DMA_ATRS
571 | OHCI_INT_DMA_PRRQ | OHCI_INT_DMA_PRRS
572 | OHCI_INT_PHY_BUS_R | OHCI_INT_PW_ERR);
573 fwohci_set_intr(&sc->fc, 1);
578 fwohci_init(struct fwohci_softc *sc, device_t dev)
585 TASK_INIT(&sc->fwohci_task_complete, 0, fwohci_complete, sc);
588 reg = OREAD(sc, OHCI_VERSION);
589 device_printf(dev, "OHCI version %x.%x (ROM=%d)\n",
590 (reg>>16) & 0xff, reg & 0xff, (reg>>24) & 1);
592 /* Available Isochrounous DMA channel probe */
593 OWRITE(sc, OHCI_IT_MASK, 0xffffffff);
594 OWRITE(sc, OHCI_IR_MASK, 0xffffffff);
595 reg = OREAD(sc, OHCI_IT_MASK) & OREAD(sc, OHCI_IR_MASK);
596 OWRITE(sc, OHCI_IT_MASKCLR, 0xffffffff);
597 OWRITE(sc, OHCI_IR_MASKCLR, 0xffffffff);
598 for (i = 0; i < 0x20; i++)
599 if ((reg & (1 << i)) == 0)
602 device_printf(dev, "No. of Isochronous channel is %d.\n", i);
604 sc->fc.arq = &sc->arrq.xferq;
605 sc->fc.ars = &sc->arrs.xferq;
606 sc->fc.atq = &sc->atrq.xferq;
607 sc->fc.ats = &sc->atrs.xferq;
609 sc->arrq.xferq.psize = roundup2(FWPMAX_S400, PAGE_SIZE);
610 sc->arrs.xferq.psize = roundup2(FWPMAX_S400, PAGE_SIZE);
611 sc->atrq.xferq.psize = roundup2(FWPMAX_S400, PAGE_SIZE);
612 sc->atrs.xferq.psize = roundup2(FWPMAX_S400, PAGE_SIZE);
614 sc->arrq.xferq.start = NULL;
615 sc->arrs.xferq.start = NULL;
616 sc->atrq.xferq.start = fwohci_start_atq;
617 sc->atrs.xferq.start = fwohci_start_ats;
619 sc->arrq.xferq.buf = NULL;
620 sc->arrs.xferq.buf = NULL;
621 sc->atrq.xferq.buf = NULL;
622 sc->atrs.xferq.buf = NULL;
626 sc->atrq.ndesc = 8; /* equal to maximum of mbuf chains */
630 sc->arrs.ndb = NDB / 2;
632 sc->atrs.ndb = NDB / 2;
634 for( i = 0 ; i < sc->fc.nisodma ; i ++ ){
635 sc->fc.it[i] = &sc->it[i].xferq;
636 sc->fc.ir[i] = &sc->ir[i].xferq;
641 sc->fc.tcode = tinfo;
644 sc->fc.config_rom = fwdma_malloc(&sc->fc, CROMSIZE, CROMSIZE,
645 &sc->crom_dma, BUS_DMA_WAITOK);
646 if(sc->fc.config_rom == NULL){
647 device_printf(dev, "config_rom alloc failed.");
652 sc->fc.config_rom[1] = 0x31333934;
653 sc->fc.config_rom[2] = 0xf000a002;
654 sc->fc.config_rom[3] = OREAD(sc, OHCI_EUID_HI);
655 sc->fc.config_rom[4] = OREAD(sc, OHCI_EUID_LO);
656 sc->fc.config_rom[5] = 0;
657 sc->fc.config_rom[0] = (4 << 24) | (5 << 16);
659 sc->fc.config_rom[0] |= fw_crc16(&sc->fc.config_rom[1], 5*4);
663 /* SID recieve buffer must allign 2^11 */
664 #define OHCI_SIDSIZE (1 << 11)
665 sc->sid_buf = fwdma_malloc(&sc->fc, OHCI_SIDSIZE, OHCI_SIDSIZE,
666 &sc->sid_dma, BUS_DMA_WAITOK);
667 if (sc->sid_buf == NULL) {
668 device_printf(dev, "sid_buf alloc failed.");
672 fwdma_malloc(&sc->fc, sizeof(u_int32_t), sizeof(u_int32_t),
673 &sc->dummy_dma, BUS_DMA_WAITOK);
675 if (sc->dummy_dma.v_addr == NULL) {
676 device_printf(dev, "dummy_dma alloc failed.");
680 fwohci_db_init(sc, &sc->arrq);
681 if ((sc->arrq.flags & FWOHCI_DBCH_INIT) == 0)
684 fwohci_db_init(sc, &sc->arrs);
685 if ((sc->arrs.flags & FWOHCI_DBCH_INIT) == 0)
688 fwohci_db_init(sc, &sc->atrq);
689 if ((sc->atrq.flags & FWOHCI_DBCH_INIT) == 0)
692 fwohci_db_init(sc, &sc->atrs);
693 if ((sc->atrs.flags & FWOHCI_DBCH_INIT) == 0)
696 sc->fc.eui.hi = OREAD(sc, FWOHCIGUID_H);
697 sc->fc.eui.lo = OREAD(sc, FWOHCIGUID_L);
698 for( i = 0 ; i < 8 ; i ++)
699 ui[i] = FW_EUI64_BYTE(&sc->fc.eui,i);
700 device_printf(dev, "EUI64 %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
701 ui[0], ui[1], ui[2], ui[3], ui[4], ui[5], ui[6], ui[7]);
703 sc->fc.ioctl = fwohci_ioctl;
704 sc->fc.cyctimer = fwohci_cyctimer;
705 sc->fc.set_bmr = fwohci_set_bus_manager;
706 sc->fc.ibr = fwohci_ibr;
707 sc->fc.irx_enable = fwohci_irx_enable;
708 sc->fc.irx_disable = fwohci_irx_disable;
710 sc->fc.itx_enable = fwohci_itxbuf_enable;
711 sc->fc.itx_disable = fwohci_itx_disable;
712 #if BYTE_ORDER == BIG_ENDIAN
713 sc->fc.irx_post = fwohci_irx_post;
715 sc->fc.irx_post = NULL;
717 sc->fc.itx_post = NULL;
718 sc->fc.timeout = fwohci_timeout;
719 sc->fc.poll = fwohci_poll;
720 sc->fc.set_intr = fwohci_set_intr;
722 sc->intmask = sc->irstat = sc->itstat = 0;
725 fwohci_reset(sc, dev);
731 fwohci_timeout(void *arg)
733 struct fwohci_softc *sc;
735 sc = (struct fwohci_softc *)arg;
739 fwohci_cyctimer(struct firewire_comm *fc)
741 struct fwohci_softc *sc = (struct fwohci_softc *)fc;
742 return(OREAD(sc, OHCI_CYCLETIMER));
746 fwohci_detach(struct fwohci_softc *sc, device_t dev)
750 if (sc->sid_buf != NULL)
751 fwdma_free(&sc->fc, &sc->sid_dma);
752 if (sc->fc.config_rom != NULL)
753 fwdma_free(&sc->fc, &sc->crom_dma);
755 fwohci_db_free(&sc->arrq);
756 fwohci_db_free(&sc->arrs);
758 fwohci_db_free(&sc->atrq);
759 fwohci_db_free(&sc->atrs);
761 for( i = 0 ; i < sc->fc.nisodma ; i ++ ){
762 fwohci_db_free(&sc->it[i]);
763 fwohci_db_free(&sc->ir[i]);
769 #define LAST_DB(dbtr, db) do { \
770 struct fwohcidb_tr *_dbtr = (dbtr); \
771 int _cnt = _dbtr->dbcnt; \
772 db = &_dbtr->db[ (_cnt > 2) ? (_cnt -1) : 0]; \
776 fwohci_execute_db(void *arg, bus_dma_segment_t *segs, int nseg, int error)
778 struct fwohcidb_tr *db_tr;
779 volatile struct fwohcidb *db;
780 bus_dma_segment_t *s;
783 db_tr = (struct fwohcidb_tr *)arg;
784 db = &db_tr->db[db_tr->dbcnt];
786 if (firewire_debug || error != EFBIG)
787 printf("fwohci_execute_db: error=%d\n", error);
790 for (i = 0; i < nseg; i++) {
792 FWOHCI_DMA_WRITE(db->db.desc.addr, s->ds_addr);
793 FWOHCI_DMA_WRITE(db->db.desc.cmd, s->ds_len);
794 FWOHCI_DMA_WRITE(db->db.desc.res, 0);
801 fwohci_execute_db2(void *arg, bus_dma_segment_t *segs, int nseg,
802 bus_size_t size, int error)
804 fwohci_execute_db(arg, segs, nseg, error);
808 fwohci_start(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
811 int tcode, hdr_len, pl_off, pl_len;
814 struct fw_xfer *xfer;
816 volatile struct fwohci_txpkthdr *ohcifp;
817 struct fwohcidb_tr *db_tr;
818 volatile struct fwohcidb *db;
819 struct tcode_info *info;
820 static int maxdesc=0;
822 if(&sc->atrq == dbch){
824 }else if(&sc->atrs == dbch){
830 if (dbch->flags & FWOHCI_DBCH_FULL)
836 xfer = STAILQ_FIRST(&dbch->xferq.q);
840 if(dbch->xferq.queued == 0 ){
841 device_printf(sc->fc.dev, "TX queue empty\n");
843 STAILQ_REMOVE_HEAD(&dbch->xferq.q, link);
845 xfer->state = FWXF_START;
847 fp = (struct fw_pkt *)xfer->send.buf;
848 tcode = fp->mode.common.tcode;
850 ohcifp = (volatile struct fwohci_txpkthdr *) db_tr->db[1].db.immed;
851 info = &tinfo[tcode];
852 hdr_len = pl_off = info->hdr_len;
853 for( i = 0 ; i < pl_off ; i+= 4){
854 ohcifp->mode.ld[i/4] = fp->mode.ld[i/4];
856 ohcifp->mode.common.spd = xfer->spd;
857 if (tcode == FWTCODE_STREAM ){
859 ohcifp->mode.stream.len = fp->mode.stream.len;
860 } else if (tcode == FWTCODE_PHY) {
862 ohcifp->mode.ld[1] = fp->mode.ld[1];
863 ohcifp->mode.ld[2] = fp->mode.ld[2];
864 ohcifp->mode.common.spd = 0;
865 ohcifp->mode.common.tcode = FWOHCITCODE_PHY;
867 ohcifp->mode.asycomm.dst = fp->mode.hdr.dst;
868 ohcifp->mode.asycomm.srcbus = OHCI_ASYSRCBUS;
869 ohcifp->mode.asycomm.tlrt |= FWRETRY_X;
872 FWOHCI_DMA_WRITE(db->db.desc.cmd,
873 OHCI_OUTPUT_MORE | OHCI_KEY_ST2 | hdr_len);
874 FWOHCI_DMA_WRITE(db->db.desc.res, 0);
875 /* Specify bound timer of asy. responce */
876 if(&sc->atrs == dbch){
877 FWOHCI_DMA_WRITE(db->db.desc.res,
878 (OREAD(sc, OHCI_CYCLETIMER) >> 12) + (1 << 13));
880 #if BYTE_ORDER == BIG_ENDIAN
881 if (tcode == FWTCODE_WREQQ || tcode == FWTCODE_RRESQ)
883 for (i = 0; i < hdr_len/4; i ++)
884 FWOHCI_DMA_WRITE(ohcifp->mode.ld[i], ohcifp->mode.ld[i]);
889 db = &db_tr->db[db_tr->dbcnt];
890 pl_len = xfer->send.len - pl_off;
894 if (xfer->mbuf == NULL) {
897 pl_addr = xfer->send.buf + pl_off;
898 err = bus_dmamap_load(dbch->dmat, db_tr->dma_map,
900 fwohci_execute_db, db_tr,
903 /* XXX we can handle only 6 (=8-2) mbuf chains */
904 err = bus_dmamap_load_mbuf(dbch->dmat, db_tr->dma_map,
906 fwohci_execute_db2, db_tr,
912 device_printf(sc->fc.dev, "EFBIG.\n");
913 m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
915 m_copydata(xfer->mbuf, 0,
916 xfer->mbuf->m_pkthdr.len,
918 m0->m_len = m0->m_pkthdr.len =
919 xfer->mbuf->m_pkthdr.len;
924 device_printf(sc->fc.dev, "m_getcl failed.\n");
928 printf("dmamap_load: err=%d\n", err);
929 bus_dmamap_sync(dbch->dmat, db_tr->dma_map,
930 BUS_DMASYNC_PREWRITE);
931 #if 0 /* OHCI_OUTPUT_MODE == 0 */
932 for (i = 2; i < db_tr->dbcnt; i++)
933 FWOHCI_DMA_SET(db_tr->db[i].db.desc.cmd,
937 if (maxdesc < db_tr->dbcnt) {
938 maxdesc = db_tr->dbcnt;
940 device_printf(sc->fc.dev, "maxdesc: %d\n", maxdesc);
944 FWOHCI_DMA_SET(db->db.desc.cmd,
945 OHCI_OUTPUT_LAST | OHCI_INTERRUPT_ALWAYS | OHCI_BRANCH_ALWAYS);
946 FWOHCI_DMA_WRITE(db->db.desc.depend,
947 STAILQ_NEXT(db_tr, link)->bus_addr);
950 fsegment = db_tr->dbcnt;
951 if (dbch->pdb_tr != NULL) {
952 LAST_DB(dbch->pdb_tr, db);
953 FWOHCI_DMA_SET(db->db.desc.depend, db_tr->dbcnt);
955 dbch->pdb_tr = db_tr;
956 db_tr = STAILQ_NEXT(db_tr, link);
957 if(db_tr != dbch->bottom){
960 device_printf(sc->fc.dev, "fwohci_start: lack of db_trq\n");
961 dbch->flags |= FWOHCI_DBCH_FULL;
965 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREREAD);
966 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE);
968 if(dbch->xferq.flag & FWXFERQ_RUNNING) {
969 OWRITE(sc, OHCI_DMACTL(off), OHCI_CNTL_DMA_WAKE);
972 device_printf(sc->fc.dev, "start AT DMA status=%x\n",
973 OREAD(sc, OHCI_DMACTL(off)));
974 OWRITE(sc, OHCI_DMACMD(off), dbch->top->bus_addr | fsegment);
975 OWRITE(sc, OHCI_DMACTL(off), OHCI_CNTL_DMA_RUN);
976 dbch->xferq.flag |= FWXFERQ_RUNNING;
985 fwohci_start_atq(struct firewire_comm *fc)
987 struct fwohci_softc *sc = (struct fwohci_softc *)fc;
988 fwohci_start( sc, &(sc->atrq));
993 fwohci_start_ats(struct firewire_comm *fc)
995 struct fwohci_softc *sc = (struct fwohci_softc *)fc;
996 fwohci_start( sc, &(sc->atrs));
1001 fwohci_txd(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
1004 struct fwohcidb_tr *tr;
1005 volatile struct fwohcidb *db;
1006 struct fw_xfer *xfer;
1010 struct firewire_comm *fc = (struct firewire_comm *)sc;
1012 if(&sc->atrq == dbch){
1015 }else if(&sc->atrs == dbch){
1024 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_POSTREAD);
1025 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_POSTWRITE);
1026 while(dbch->xferq.queued > 0){
1028 status = FWOHCI_DMA_READ(db->db.desc.res) >> OHCI_STATUS_SHIFT;
1029 if(!(status & OHCI_CNTL_DMA_ACTIVE)){
1030 if (fc->status != FWBUSRESET)
1031 /* maybe out of order?? */
1034 bus_dmamap_sync(dbch->dmat, tr->dma_map,
1035 BUS_DMASYNC_POSTWRITE);
1036 bus_dmamap_unload(dbch->dmat, tr->dma_map);
1040 if(status & OHCI_CNTL_DMA_DEAD) {
1042 OWRITE(sc, OHCI_DMACTLCLR(off), OHCI_CNTL_DMA_RUN);
1043 device_printf(sc->fc.dev, "force reset AT FIFO\n");
1044 OWRITE(sc, OHCI_HCCCTLCLR, OHCI_HCC_LINKEN);
1045 OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_LPS | OHCI_HCC_LINKEN);
1046 OWRITE(sc, OHCI_DMACTLCLR(off), OHCI_CNTL_DMA_RUN);
1048 stat = status & FWOHCIEV_MASK;
1050 case FWOHCIEV_ACKPEND:
1051 case FWOHCIEV_ACKCOMPL:
1054 case FWOHCIEV_ACKBSA:
1055 case FWOHCIEV_ACKBSB:
1056 case FWOHCIEV_ACKBSX:
1057 device_printf(sc->fc.dev, "txd err=%2x %s\n", stat, fwohcicode[stat]);
1060 case FWOHCIEV_FLUSHED:
1061 case FWOHCIEV_ACKTARD:
1062 device_printf(sc->fc.dev, "txd err=%2x %s\n", stat, fwohcicode[stat]);
1065 case FWOHCIEV_MISSACK:
1066 case FWOHCIEV_UNDRRUN:
1067 case FWOHCIEV_OVRRUN:
1068 case FWOHCIEV_DESCERR:
1069 case FWOHCIEV_DTRDERR:
1070 case FWOHCIEV_TIMEOUT:
1071 case FWOHCIEV_TCODERR:
1072 case FWOHCIEV_UNKNOWN:
1073 case FWOHCIEV_ACKDERR:
1074 case FWOHCIEV_ACKTERR:
1076 device_printf(sc->fc.dev, "txd err=%2x %s\n",
1077 stat, fwohcicode[stat]);
1081 if (tr->xfer != NULL) {
1083 if (xfer->state == FWXF_RCVD) {
1085 printf("already rcvd\n");
1088 xfer->state = FWXF_SENT;
1089 if (err == EBUSY && fc->status != FWBUSRESET) {
1090 xfer->state = FWXF_BUSY;
1092 if (xfer->retry_req != NULL)
1093 xfer->retry_req(xfer);
1098 } else if (stat != FWOHCIEV_ACKPEND) {
1099 if (stat != FWOHCIEV_ACKCOMPL)
1100 xfer->state = FWXF_SENTERR;
1107 * The watchdog timer takes care of split
1108 * transcation timeout for ACKPEND case.
1111 printf("this shouldn't happen\n");
1113 dbch->xferq.queued --;
1117 tr = STAILQ_NEXT(tr, link);
1119 if (dbch->bottom == dbch->top) {
1120 /* we reaches the end of context program */
1121 if (firewire_debug && dbch->xferq.queued > 0)
1122 printf("queued > 0\n");
1127 if ((dbch->flags & FWOHCI_DBCH_FULL) && packets > 0) {
1128 printf("make free slot\n");
1129 dbch->flags &= ~FWOHCI_DBCH_FULL;
1130 fwohci_start(sc, dbch);
1136 fwohci_db_free(struct fwohci_dbch *dbch)
1138 struct fwohcidb_tr *db_tr;
1141 if ((dbch->flags & FWOHCI_DBCH_INIT) == 0)
1144 for(db_tr = STAILQ_FIRST(&dbch->db_trq), idb = 0; idb < dbch->ndb;
1145 db_tr = STAILQ_NEXT(db_tr, link), idb++){
1146 if ((dbch->xferq.flag & FWXFERQ_EXTBUF) == 0 &&
1147 db_tr->buf != NULL) {
1148 fwdma_free_size(dbch->dmat, db_tr->dma_map,
1149 db_tr->buf, dbch->xferq.psize);
1151 } else if (db_tr->dma_map != NULL)
1152 bus_dmamap_destroy(dbch->dmat, db_tr->dma_map);
1155 db_tr = STAILQ_FIRST(&dbch->db_trq);
1156 fwdma_free_multiseg(dbch->am);
1158 STAILQ_INIT(&dbch->db_trq);
1159 dbch->flags &= ~FWOHCI_DBCH_INIT;
1163 fwohci_db_init(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
1166 struct fwohcidb_tr *db_tr;
1168 if ((dbch->flags & FWOHCI_DBCH_INIT) != 0)
1171 /* create dma_tag for buffers */
1172 #define MAX_REQCOUNT 0xffff
1173 if (bus_dma_tag_create(/*parent*/ sc->fc.dmat,
1174 /*alignment*/ 1, /*boundary*/ 0,
1175 /*lowaddr*/ BUS_SPACE_MAXADDR_32BIT,
1176 /*highaddr*/ BUS_SPACE_MAXADDR,
1177 /*filter*/NULL, /*filterarg*/NULL,
1178 /*maxsize*/ dbch->xferq.psize,
1179 /*nsegments*/ dbch->ndesc > 3 ? dbch->ndesc - 2 : 1,
1180 /*maxsegsz*/ MAX_REQCOUNT,
1181 /*flags*/ 0, &dbch->dmat))
1184 /* allocate DB entries and attach one to each DMA channels */
1185 /* DB entry must start at 16 bytes bounary. */
1186 STAILQ_INIT(&dbch->db_trq);
1187 db_tr = (struct fwohcidb_tr *)
1188 malloc(sizeof(struct fwohcidb_tr) * dbch->ndb,
1189 M_FW, M_WAITOK | M_ZERO);
1191 printf("fwohci_db_init: malloc(1) failed\n");
1195 #define DB_SIZE(x) (sizeof(struct fwohcidb) * (x)->ndesc)
1196 dbch->am = fwdma_malloc_multiseg(&sc->fc, DB_SIZE(dbch),
1197 DB_SIZE(dbch), dbch->ndb, BUS_DMA_WAITOK);
1198 if (dbch->am == NULL) {
1199 printf("fwohci_db_init: fwdma_malloc_multiseg failed\n");
1202 /* Attach DB to DMA ch. */
1203 for(idb = 0 ; idb < dbch->ndb ; idb++){
1205 db_tr->db = (struct fwohcidb *)fwdma_v_addr(dbch->am, idb);
1206 db_tr->bus_addr = fwdma_bus_addr(dbch->am, idb);
1207 /* create dmamap for buffers */
1208 /* XXX do we need 4bytes alignment tag? */
1209 /* XXX don't alloc dma_map for AR */
1210 if (bus_dmamap_create(dbch->dmat, 0, &db_tr->dma_map) != 0) {
1211 printf("bus_dmamap_create failed\n");
1212 dbch->flags = FWOHCI_DBCH_INIT; /* XXX fake */
1213 fwohci_db_free(dbch);
1216 STAILQ_INSERT_TAIL(&dbch->db_trq, db_tr, link);
1217 if (dbch->xferq.flag & FWXFERQ_EXTBUF) {
1218 if (idb % dbch->xferq.bnpacket == 0)
1219 dbch->xferq.bulkxfer[idb / dbch->xferq.bnpacket
1220 ].start = (caddr_t)db_tr;
1221 if ((idb + 1) % dbch->xferq.bnpacket == 0)
1222 dbch->xferq.bulkxfer[idb / dbch->xferq.bnpacket
1223 ].end = (caddr_t)db_tr;
1227 STAILQ_LAST(&dbch->db_trq, fwohcidb_tr,link)->link.stqe_next
1228 = STAILQ_FIRST(&dbch->db_trq);
1230 dbch->xferq.queued = 0;
1231 dbch->pdb_tr = NULL;
1232 dbch->top = STAILQ_FIRST(&dbch->db_trq);
1233 dbch->bottom = dbch->top;
1234 dbch->flags = FWOHCI_DBCH_INIT;
1238 fwohci_itx_disable(struct firewire_comm *fc, int dmach)
1240 struct fwohci_softc *sc = (struct fwohci_softc *)fc;
1243 OWRITE(sc, OHCI_ITCTLCLR(dmach),
1244 OHCI_CNTL_DMA_RUN | OHCI_CNTL_CYCMATCH_S);
1245 OWRITE(sc, OHCI_IT_MASKCLR, 1 << dmach);
1246 OWRITE(sc, OHCI_IT_STATCLR, 1 << dmach);
1247 /* XXX we cannot free buffers until the DMA really stops */
1248 tsleep((void *)&sleepch, FWPRI, "fwitxd", hz);
1249 fwohci_db_free(&sc->it[dmach]);
1250 sc->it[dmach].xferq.flag &= ~FWXFERQ_RUNNING;
1255 fwohci_irx_disable(struct firewire_comm *fc, int dmach)
1257 struct fwohci_softc *sc = (struct fwohci_softc *)fc;
1260 OWRITE(sc, OHCI_IRCTLCLR(dmach), OHCI_CNTL_DMA_RUN);
1261 OWRITE(sc, OHCI_IR_MASKCLR, 1 << dmach);
1262 OWRITE(sc, OHCI_IR_STATCLR, 1 << dmach);
1263 /* XXX we cannot free buffers until the DMA really stops */
1264 tsleep((void *)&sleepch, FWPRI, "fwirxd", hz);
1265 fwohci_db_free(&sc->ir[dmach]);
1266 sc->ir[dmach].xferq.flag &= ~FWXFERQ_RUNNING;
1270 #if BYTE_ORDER == BIG_ENDIAN
1272 fwohci_irx_post (struct firewire_comm *fc , u_int32_t *qld)
1274 qld[0] = FWOHCI_DMA_READ(qld[0]);
1280 fwohci_tx_enable(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
1283 int idb, z, i, dmach = 0, ldesc;
1284 u_int32_t off = NULL;
1285 struct fwohcidb_tr *db_tr;
1286 volatile struct fwohcidb *db;
1288 if(!(dbch->xferq.flag & FWXFERQ_EXTBUF)){
1293 for(dmach = 0 ; dmach < sc->fc.nisodma ; dmach++){
1294 if( &sc->it[dmach] == dbch){
1295 off = OHCI_ITOFF(dmach);
1303 if(dbch->xferq.flag & FWXFERQ_RUNNING)
1305 dbch->xferq.flag |= FWXFERQ_RUNNING;
1306 for( i = 0, dbch->bottom = dbch->top; i < (dbch->ndb - 1); i++){
1307 dbch->bottom = STAILQ_NEXT(dbch->bottom, link);
1310 for (idb = 0; idb < dbch->ndb; idb ++) {
1311 fwohci_add_tx_buf(dbch, db_tr, idb);
1312 if(STAILQ_NEXT(db_tr, link) == NULL){
1316 ldesc = db_tr->dbcnt - 1;
1317 FWOHCI_DMA_WRITE(db[0].db.desc.depend,
1318 STAILQ_NEXT(db_tr, link)->bus_addr | z);
1319 db[ldesc].db.desc.depend = db[0].db.desc.depend;
1320 if(dbch->xferq.flag & FWXFERQ_EXTBUF){
1321 if(((idb + 1 ) % dbch->xferq.bnpacket) == 0){
1323 db[ldesc].db.desc.cmd,
1324 OHCI_INTERRUPT_ALWAYS);
1325 /* OHCI 1.1 and above */
1328 OHCI_INTERRUPT_ALWAYS);
1331 db_tr = STAILQ_NEXT(db_tr, link);
1334 dbch->bottom->db[dbch->bottom->dbcnt - 1].db.desc.depend, 0xf);
1339 fwohci_rx_enable(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
1342 int idb, z, i, dmach = 0, ldesc;
1343 u_int32_t off = NULL;
1344 struct fwohcidb_tr *db_tr;
1345 volatile struct fwohcidb *db;
1348 if(&sc->arrq == dbch){
1350 }else if(&sc->arrs == dbch){
1353 for(dmach = 0 ; dmach < sc->fc.nisodma ; dmach++){
1354 if( &sc->ir[dmach] == dbch){
1355 off = OHCI_IROFF(dmach);
1364 if(dbch->xferq.flag & FWXFERQ_STREAM){
1365 if(dbch->xferq.flag & FWXFERQ_RUNNING)
1368 if(dbch->xferq.flag & FWXFERQ_RUNNING){
1373 dbch->xferq.flag |= FWXFERQ_RUNNING;
1374 dbch->top = STAILQ_FIRST(&dbch->db_trq);
1375 for( i = 0, dbch->bottom = dbch->top; i < (dbch->ndb - 1); i++){
1376 dbch->bottom = STAILQ_NEXT(dbch->bottom, link);
1379 for (idb = 0; idb < dbch->ndb; idb ++) {
1380 fwohci_add_rx_buf(dbch, db_tr, idb, &sc->dummy_dma);
1381 if (STAILQ_NEXT(db_tr, link) == NULL)
1384 ldesc = db_tr->dbcnt - 1;
1385 FWOHCI_DMA_WRITE(db[ldesc].db.desc.depend,
1386 STAILQ_NEXT(db_tr, link)->bus_addr | z);
1387 if(dbch->xferq.flag & FWXFERQ_EXTBUF){
1388 if(((idb + 1 ) % dbch->xferq.bnpacket) == 0){
1390 db[ldesc].db.desc.cmd,
1391 OHCI_INTERRUPT_ALWAYS);
1393 db[ldesc].db.desc.depend,
1397 db_tr = STAILQ_NEXT(db_tr, link);
1400 dbch->bottom->db[db_tr->dbcnt - 1].db.desc.depend, 0xf);
1401 dbch->buf_offset = 0;
1402 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREREAD);
1403 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE);
1404 if(dbch->xferq.flag & FWXFERQ_STREAM){
1407 OWRITE(sc, OHCI_DMACMD(off), dbch->top->bus_addr | z);
1409 OWRITE(sc, OHCI_DMACTL(off), OHCI_CNTL_DMA_RUN);
1414 fwohci_next_cycle(struct firewire_comm *fc, int cycle_now)
1416 int sec, cycle, cycle_match;
1418 cycle = cycle_now & 0x1fff;
1419 sec = cycle_now >> 13;
1420 #define CYCLE_MOD 0x10
1422 #define CYCLE_DELAY 8 /* min delay to start DMA */
1424 #define CYCLE_DELAY 7000 /* min delay to start DMA */
1426 cycle = cycle + CYCLE_DELAY;
1427 if (cycle >= 8000) {
1431 cycle = roundup2(cycle, CYCLE_MOD);
1432 if (cycle >= 8000) {
1439 cycle_match = ((sec << 13) | cycle) & 0x7ffff;
1441 return(cycle_match);
1445 fwohci_itxbuf_enable(struct firewire_comm *fc, int dmach)
1447 struct fwohci_softc *sc = (struct fwohci_softc *)fc;
1449 unsigned short tag, ich;
1450 struct fwohci_dbch *dbch;
1451 int cycle_match, cycle_now, s, ldesc;
1453 struct fw_bulkxfer *first, *chunk, *prev;
1454 struct fw_xferq *it;
1456 dbch = &sc->it[dmach];
1459 tag = (it->flag >> 6) & 3;
1460 ich = it->flag & 0x3f;
1461 if ((dbch->flags & FWOHCI_DBCH_INIT) == 0) {
1462 dbch->ndb = it->bnpacket * it->bnchunk;
1464 fwohci_db_init(sc, dbch);
1465 if ((dbch->flags & FWOHCI_DBCH_INIT) == 0)
1467 err = fwohci_tx_enable(sc, dbch);
1472 ldesc = dbch->ndesc - 1;
1474 prev = STAILQ_LAST(&it->stdma, fw_bulkxfer, link);
1475 while ((chunk = STAILQ_FIRST(&it->stvalid)) != NULL) {
1476 volatile struct fwohcidb *db;
1478 fwdma_sync_multiseg(it->buf, chunk->poffset, it->bnpacket,
1479 BUS_DMASYNC_PREWRITE);
1480 fwohci_txbufdb(sc, dmach, chunk);
1482 db = ((struct fwohcidb_tr *)(prev->end))->db;
1483 #if 0 /* XXX necessary? */
1484 FWOHCI_DMA_SET(db[ldesc].db.desc.cmd,
1485 OHCI_BRANCH_ALWAYS);
1487 #if 0 /* if bulkxfer->npacket changes */
1488 db[ldesc].db.desc.depend = db[0].db.desc.depend =
1489 ((struct fwohcidb_tr *)
1490 (chunk->start))->bus_addr | dbch->ndesc;
1492 FWOHCI_DMA_SET(db[0].db.desc.depend, dbch->ndesc);
1493 FWOHCI_DMA_SET(db[ldesc].db.desc.depend, dbch->ndesc);
1496 STAILQ_REMOVE_HEAD(&it->stvalid, link);
1497 STAILQ_INSERT_TAIL(&it->stdma, chunk, link);
1500 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE);
1501 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREREAD);
1503 stat = OREAD(sc, OHCI_ITCTL(dmach));
1504 if (firewire_debug && (stat & OHCI_CNTL_CYCMATCH_S))
1505 printf("stat 0x%x\n", stat);
1507 if (stat & (OHCI_CNTL_DMA_ACTIVE | OHCI_CNTL_CYCMATCH_S))
1511 OWRITE(sc, OHCI_ITCTLCLR(dmach), OHCI_CNTL_DMA_RUN);
1513 OWRITE(sc, OHCI_IT_MASKCLR, 1 << dmach);
1514 OWRITE(sc, OHCI_IT_STATCLR, 1 << dmach);
1515 OWRITE(sc, OHCI_IT_MASK, 1 << dmach);
1516 OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_DMA_IT);
1518 first = STAILQ_FIRST(&it->stdma);
1519 OWRITE(sc, OHCI_ITCMD(dmach),
1520 ((struct fwohcidb_tr *)(first->start))->bus_addr | dbch->ndesc);
1521 if (firewire_debug) {
1522 printf("fwohci_itxbuf_enable: kick 0x%08x\n", stat);
1524 dump_dma(sc, ITX_CH + dmach);
1527 if ((stat & OHCI_CNTL_DMA_RUN) == 0) {
1529 /* Don't start until all chunks are buffered */
1530 if (STAILQ_FIRST(&it->stfree) != NULL)
1534 /* Clear cycle match counter bits */
1535 OWRITE(sc, OHCI_ITCTLCLR(dmach), 0xffff0000);
1537 /* 2bit second + 13bit cycle */
1538 cycle_now = (fc->cyctimer(fc) >> 12) & 0x7fff;
1539 cycle_match = fwohci_next_cycle(fc, cycle_now);
1541 OWRITE(sc, OHCI_ITCTL(dmach),
1542 OHCI_CNTL_CYCMATCH_S | (cycle_match << 16)
1543 | OHCI_CNTL_DMA_RUN);
1545 OWRITE(sc, OHCI_ITCTL(dmach), OHCI_CNTL_DMA_RUN);
1547 if (firewire_debug) {
1548 printf("cycle_match: 0x%04x->0x%04x\n",
1549 cycle_now, cycle_match);
1550 dump_dma(sc, ITX_CH + dmach);
1551 dump_db(sc, ITX_CH + dmach);
1553 } else if ((stat & OHCI_CNTL_CYCMATCH_S) == 0) {
1554 device_printf(sc->fc.dev,
1555 "IT DMA underrun (0x%08x)\n", stat);
1556 OWRITE(sc, OHCI_ITCTL(dmach), OHCI_CNTL_DMA_WAKE);
1563 fwohci_irx_enable(struct firewire_comm *fc, int dmach)
1565 struct fwohci_softc *sc = (struct fwohci_softc *)fc;
1566 int err = 0, s, ldesc;
1567 unsigned short tag, ich;
1569 struct fwohci_dbch *dbch;
1570 struct fwohcidb_tr *db_tr;
1571 struct fw_bulkxfer *first, *prev, *chunk;
1572 struct fw_xferq *ir;
1574 dbch = &sc->ir[dmach];
1577 if ((ir->flag & FWXFERQ_RUNNING) == 0) {
1578 tag = (ir->flag >> 6) & 3;
1579 ich = ir->flag & 0x3f;
1580 OWRITE(sc, OHCI_IRMATCH(dmach), tagbit[tag] | ich);
1583 dbch->ndb = ir->bnpacket * ir->bnchunk;
1585 fwohci_db_init(sc, dbch);
1586 if ((dbch->flags & FWOHCI_DBCH_INIT) == 0)
1588 err = fwohci_rx_enable(sc, dbch);
1593 first = STAILQ_FIRST(&ir->stfree);
1594 if (first == NULL) {
1595 device_printf(fc->dev, "IR DMA no free chunk\n");
1599 ldesc = dbch->ndesc - 1;
1601 prev = STAILQ_LAST(&ir->stdma, fw_bulkxfer, link);
1602 while ((chunk = STAILQ_FIRST(&ir->stfree)) != NULL) {
1603 volatile struct fwohcidb *db;
1605 #if 1 /* XXX for if_fwe */
1606 if (chunk->mbuf != NULL) {
1607 db_tr = (struct fwohcidb_tr *)(chunk->start);
1609 err = bus_dmamap_load_mbuf(dbch->dmat, db_tr->dma_map,
1610 chunk->mbuf, fwohci_execute_db2, db_tr,
1612 FWOHCI_DMA_SET(db_tr->db[1].db.desc.cmd,
1613 OHCI_UPDATE | OHCI_INPUT_LAST |
1614 OHCI_INTERRUPT_ALWAYS | OHCI_BRANCH_ALWAYS);
1617 db = ((struct fwohcidb_tr *)(chunk->end))->db;
1618 FWOHCI_DMA_WRITE(db[ldesc].db.desc.res, 0);
1619 FWOHCI_DMA_CLEAR(db[ldesc].db.desc.depend, 0xf);
1621 db = ((struct fwohcidb_tr *)(prev->end))->db;
1622 FWOHCI_DMA_SET(db[ldesc].db.desc.depend, dbch->ndesc);
1624 STAILQ_REMOVE_HEAD(&ir->stfree, link);
1625 STAILQ_INSERT_TAIL(&ir->stdma, chunk, link);
1628 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE);
1629 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREREAD);
1631 stat = OREAD(sc, OHCI_IRCTL(dmach));
1632 if (stat & OHCI_CNTL_DMA_ACTIVE)
1634 if (stat & OHCI_CNTL_DMA_RUN) {
1635 OWRITE(sc, OHCI_IRCTLCLR(dmach), OHCI_CNTL_DMA_RUN);
1636 device_printf(sc->fc.dev, "IR DMA overrun (0x%08x)\n", stat);
1640 printf("start IR DMA 0x%x\n", stat);
1641 OWRITE(sc, OHCI_IR_MASKCLR, 1 << dmach);
1642 OWRITE(sc, OHCI_IR_STATCLR, 1 << dmach);
1643 OWRITE(sc, OHCI_IR_MASK, 1 << dmach);
1644 OWRITE(sc, OHCI_IRCTLCLR(dmach), 0xf0000000);
1645 OWRITE(sc, OHCI_IRCTL(dmach), OHCI_CNTL_ISOHDR);
1646 OWRITE(sc, OHCI_IRCMD(dmach),
1647 ((struct fwohcidb_tr *)(first->start))->bus_addr
1649 OWRITE(sc, OHCI_IRCTL(dmach), OHCI_CNTL_DMA_RUN);
1650 OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_DMA_IR);
1652 dump_db(sc, IRX_CH + dmach);
1658 fwohci_stop(struct fwohci_softc *sc, device_t dev)
1662 /* Now stopping all DMA channel */
1663 OWRITE(sc, OHCI_ARQCTLCLR, OHCI_CNTL_DMA_RUN);
1664 OWRITE(sc, OHCI_ARSCTLCLR, OHCI_CNTL_DMA_RUN);
1665 OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN);
1666 OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN);
1668 for( i = 0 ; i < sc->fc.nisodma ; i ++ ){
1669 OWRITE(sc, OHCI_IRCTLCLR(i), OHCI_CNTL_DMA_RUN);
1670 OWRITE(sc, OHCI_ITCTLCLR(i), OHCI_CNTL_DMA_RUN);
1673 /* FLUSH FIFO and reset Transmitter/Reciever */
1674 OWRITE(sc, OHCI_HCCCTL, OHCI_HCC_RESET);
1676 /* Stop interrupt */
1677 OWRITE(sc, FWOHCI_INTMASKCLR,
1678 OHCI_INT_EN | OHCI_INT_ERR | OHCI_INT_PHY_SID
1680 | OHCI_INT_DMA_ATRQ | OHCI_INT_DMA_ATRS
1681 | OHCI_INT_DMA_PRRQ | OHCI_INT_DMA_PRRS
1682 | OHCI_INT_DMA_ARRQ | OHCI_INT_DMA_ARRS
1683 | OHCI_INT_PHY_BUS_R);
1684 /* XXX Link down? Bus reset? */
1689 fwohci_resume(struct fwohci_softc *sc, device_t dev)
1693 fwohci_reset(sc, dev);
1694 /* XXX resume isochronus receive automatically. (how about TX?) */
1695 for(i = 0; i < sc->fc.nisodma; i ++) {
1696 if((sc->ir[i].xferq.flag & FWXFERQ_RUNNING) != 0) {
1697 device_printf(sc->fc.dev,
1698 "resume iso receive ch: %d\n", i);
1699 sc->ir[i].xferq.flag &= ~FWXFERQ_RUNNING;
1700 sc->fc.irx_enable(&sc->fc, i);
1704 bus_generic_resume(dev);
1705 sc->fc.ibr(&sc->fc);
1711 fwohci_intr_body(struct fwohci_softc *sc, u_int32_t stat, int count)
1713 u_int32_t irstat, itstat;
1715 struct firewire_comm *fc = (struct firewire_comm *)sc;
1718 if(stat & OREAD(sc, FWOHCI_INTMASK))
1719 device_printf(fc->dev, "INTERRUPT < %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s> 0x%08x, 0x%08x\n",
1720 stat & OHCI_INT_EN ? "DMA_EN ":"",
1721 stat & OHCI_INT_PHY_REG ? "PHY_REG ":"",
1722 stat & OHCI_INT_CYC_LONG ? "CYC_LONG ":"",
1723 stat & OHCI_INT_ERR ? "INT_ERR ":"",
1724 stat & OHCI_INT_CYC_ERR ? "CYC_ERR ":"",
1725 stat & OHCI_INT_CYC_LOST ? "CYC_LOST ":"",
1726 stat & OHCI_INT_CYC_64SECOND ? "CYC_64SECOND ":"",
1727 stat & OHCI_INT_CYC_START ? "CYC_START ":"",
1728 stat & OHCI_INT_PHY_INT ? "PHY_INT ":"",
1729 stat & OHCI_INT_PHY_BUS_R ? "BUS_RESET ":"",
1730 stat & OHCI_INT_PHY_SID ? "SID ":"",
1731 stat & OHCI_INT_LR_ERR ? "DMA_LR_ERR ":"",
1732 stat & OHCI_INT_PW_ERR ? "DMA_PW_ERR ":"",
1733 stat & OHCI_INT_DMA_IR ? "DMA_IR ":"",
1734 stat & OHCI_INT_DMA_IT ? "DMA_IT " :"",
1735 stat & OHCI_INT_DMA_PRRS ? "DMA_PRRS " :"",
1736 stat & OHCI_INT_DMA_PRRQ ? "DMA_PRRQ " :"",
1737 stat & OHCI_INT_DMA_ARRS ? "DMA_ARRS " :"",
1738 stat & OHCI_INT_DMA_ARRQ ? "DMA_ARRQ " :"",
1739 stat & OHCI_INT_DMA_ATRS ? "DMA_ATRS " :"",
1740 stat & OHCI_INT_DMA_ATRQ ? "DMA_ATRQ " :"",
1741 stat, OREAD(sc, FWOHCI_INTMASK)
1745 if(stat & OHCI_INT_PHY_BUS_R ){
1746 if (fc->status == FWBUSRESET)
1748 /* Disable bus reset interrupt until sid recv. */
1749 OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_PHY_BUS_R);
1751 device_printf(fc->dev, "BUS reset\n");
1752 OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_CYC_LOST);
1753 OWRITE(sc, OHCI_LNKCTLCLR, OHCI_CNTL_CYCSRC);
1755 OWRITE(sc, OHCI_ATQCTLCLR, OHCI_CNTL_DMA_RUN);
1756 sc->atrq.xferq.flag &= ~FWXFERQ_RUNNING;
1757 OWRITE(sc, OHCI_ATSCTLCLR, OHCI_CNTL_DMA_RUN);
1758 sc->atrs.xferq.flag &= ~FWXFERQ_RUNNING;
1761 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_PHY_BUS_R);
1766 if((stat & OHCI_INT_DMA_IR )){
1768 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_DMA_IR);
1770 #if __FreeBSD_version >= 500000
1771 irstat = atomic_readandclear_int(&sc->irstat);
1773 irstat = sc->irstat;
1776 for(i = 0; i < fc->nisodma ; i++){
1777 struct fwohci_dbch *dbch;
1779 if((irstat & (1 << i)) != 0){
1781 if ((dbch->xferq.flag & FWXFERQ_OPEN) == 0) {
1782 device_printf(sc->fc.dev,
1783 "dma(%d) not active\n", i);
1786 fwohci_rbuf_update(sc, i);
1790 if((stat & OHCI_INT_DMA_IT )){
1792 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_DMA_IT);
1794 #if __FreeBSD_version >= 500000
1795 itstat = atomic_readandclear_int(&sc->itstat);
1797 itstat = sc->itstat;
1800 for(i = 0; i < fc->nisodma ; i++){
1801 if((itstat & (1 << i)) != 0){
1802 fwohci_tbuf_update(sc, i);
1806 if((stat & OHCI_INT_DMA_PRRS )){
1808 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_DMA_PRRS);
1811 dump_dma(sc, ARRS_CH);
1812 dump_db(sc, ARRS_CH);
1814 fwohci_arcv(sc, &sc->arrs, count);
1816 if((stat & OHCI_INT_DMA_PRRQ )){
1818 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_DMA_PRRQ);
1821 dump_dma(sc, ARRQ_CH);
1822 dump_db(sc, ARRQ_CH);
1824 fwohci_arcv(sc, &sc->arrq, count);
1826 if(stat & OHCI_INT_PHY_SID){
1827 u_int32_t *buf, node_id;
1831 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_PHY_SID);
1833 /* Enable bus reset interrupt */
1834 OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_PHY_BUS_R);
1835 /* Allow async. request to us */
1836 OWRITE(sc, OHCI_AREQHI, 1 << 31);
1837 /* XXX insecure ?? */
1838 OWRITE(sc, OHCI_PREQHI, 0x7fffffff);
1839 OWRITE(sc, OHCI_PREQLO, 0xffffffff);
1840 OWRITE(sc, OHCI_PREQUPPER, 0x10000);
1841 /* Set ATRetries register */
1842 OWRITE(sc, OHCI_ATRETRY, 1<<(13+16) | 0xfff);
1844 ** Checking whether the node is root or not. If root, turn on
1847 node_id = OREAD(sc, FWOHCI_NODEID);
1848 plen = OREAD(sc, OHCI_SID_CNT);
1850 device_printf(fc->dev, "node_id=0x%08x, gen=%d, ",
1851 node_id, (plen >> 16) & 0xff);
1852 if (!(node_id & OHCI_NODE_VALID)) {
1853 printf("Bus reset failure\n");
1856 if (node_id & OHCI_NODE_ROOT) {
1857 printf("CYCLEMASTER mode\n");
1858 OWRITE(sc, OHCI_LNKCTL,
1859 OHCI_CNTL_CYCMTR | OHCI_CNTL_CYCTIMER);
1861 printf("non CYCLEMASTER mode\n");
1862 OWRITE(sc, OHCI_LNKCTLCLR, OHCI_CNTL_CYCMTR);
1863 OWRITE(sc, OHCI_LNKCTL, OHCI_CNTL_CYCTIMER);
1865 fc->nodeid = node_id & 0x3f;
1867 if (plen & OHCI_SID_ERR) {
1868 device_printf(fc->dev, "SID Error\n");
1871 plen &= OHCI_SID_CNT_MASK;
1872 if (plen < 4 || plen > OHCI_SIDSIZE) {
1873 device_printf(fc->dev, "invalid SID len = %d\n", plen);
1876 plen -= 4; /* chop control info */
1877 buf = (u_int32_t *)malloc(OHCI_SIDSIZE, M_FW, M_NOWAIT);
1879 device_printf(fc->dev, "malloc failed\n");
1882 for (i = 0; i < plen / 4; i ++)
1883 buf[i] = FWOHCI_DMA_READ(sc->sid_buf[i+1]);
1885 /* pending all pre-bus_reset packets */
1886 fwohci_txd(sc, &sc->atrq);
1887 fwohci_txd(sc, &sc->atrs);
1888 fwohci_arcv(sc, &sc->arrs, -1);
1889 fwohci_arcv(sc, &sc->arrq, -1);
1892 fw_sidrcv(fc, buf, plen);
1896 if((stat & OHCI_INT_DMA_ATRQ )){
1898 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_DMA_ATRQ);
1900 fwohci_txd(sc, &(sc->atrq));
1902 if((stat & OHCI_INT_DMA_ATRS )){
1904 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_DMA_ATRS);
1906 fwohci_txd(sc, &(sc->atrs));
1908 if((stat & OHCI_INT_PW_ERR )){
1910 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_PW_ERR);
1912 device_printf(fc->dev, "posted write error\n");
1914 if((stat & OHCI_INT_ERR )){
1916 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_ERR);
1918 device_printf(fc->dev, "unrecoverable error\n");
1920 if((stat & OHCI_INT_PHY_INT)) {
1922 OWRITE(sc, FWOHCI_INTSTATCLR, OHCI_INT_PHY_INT);
1924 device_printf(fc->dev, "phy int\n");
1930 #if FWOHCI_TASKQUEUE
1932 fwohci_complete(void *arg, int pending)
1934 struct fwohci_softc *sc = (struct fwohci_softc *)arg;
1938 stat = atomic_readandclear_int(&sc->intstat);
1940 fwohci_intr_body(sc, stat, -1);
1948 fwochi_check_stat(struct fwohci_softc *sc)
1950 u_int32_t stat, irstat, itstat;
1952 stat = OREAD(sc, FWOHCI_INTSTAT);
1953 if (stat == 0xffffffff) {
1954 device_printf(sc->fc.dev,
1955 "device physically ejected?\n");
1960 OWRITE(sc, FWOHCI_INTSTATCLR, stat);
1962 if (stat & OHCI_INT_DMA_IR) {
1963 irstat = OREAD(sc, OHCI_IR_STAT);
1964 OWRITE(sc, OHCI_IR_STATCLR, irstat);
1965 atomic_set_int(&sc->irstat, irstat);
1967 if (stat & OHCI_INT_DMA_IT) {
1968 itstat = OREAD(sc, OHCI_IT_STAT);
1969 OWRITE(sc, OHCI_IT_STATCLR, itstat);
1970 atomic_set_int(&sc->itstat, itstat);
1976 fwohci_intr(void *arg)
1978 struct fwohci_softc *sc = (struct fwohci_softc *)arg;
1980 #if !FWOHCI_TASKQUEUE
1981 u_int32_t bus_reset = 0;
1984 if (!(sc->intmask & OHCI_INT_EN)) {
1989 #if !FWOHCI_TASKQUEUE
1992 stat = fwochi_check_stat(sc);
1993 if (stat == 0 || stat == 0xffffffff)
1995 #if FWOHCI_TASKQUEUE
1996 atomic_set_int(&sc->intstat, stat);
1997 /* XXX mask bus reset intr. during bus reset phase */
1999 taskqueue_enqueue(taskqueue_swi_giant, &sc->fwohci_task_complete);
2001 /* We cannot clear bus reset event during bus reset phase */
2002 if ((stat & ~bus_reset) == 0)
2004 bus_reset = stat & OHCI_INT_PHY_BUS_R;
2005 fwohci_intr_body(sc, stat, -1);
2011 fwohci_poll(struct firewire_comm *fc, int quick, int count)
2015 struct fwohci_softc *sc;
2018 sc = (struct fwohci_softc *)fc;
2019 stat = OHCI_INT_DMA_IR | OHCI_INT_DMA_IT |
2020 OHCI_INT_DMA_PRRS | OHCI_INT_DMA_PRRQ |
2021 OHCI_INT_DMA_ATRQ | OHCI_INT_DMA_ATRS;
2027 stat = fwochi_check_stat(sc);
2028 if (stat == 0 || stat == 0xffffffff)
2032 fwohci_intr_body(sc, stat, count);
2037 fwohci_set_intr(struct firewire_comm *fc, int enable)
2039 struct fwohci_softc *sc;
2041 sc = (struct fwohci_softc *)fc;
2043 device_printf(sc->fc.dev, "fwohci_set_intr: %d\n", enable);
2045 sc->intmask |= OHCI_INT_EN;
2046 OWRITE(sc, FWOHCI_INTMASK, OHCI_INT_EN);
2048 sc->intmask &= ~OHCI_INT_EN;
2049 OWRITE(sc, FWOHCI_INTMASKCLR, OHCI_INT_EN);
2054 fwohci_tbuf_update(struct fwohci_softc *sc, int dmach)
2056 struct firewire_comm *fc = &sc->fc;
2057 volatile struct fwohcidb *db;
2058 struct fw_bulkxfer *chunk;
2059 struct fw_xferq *it;
2060 u_int32_t stat, count;
2064 ldesc = sc->it[dmach].ndesc - 1;
2065 s = splfw(); /* unnecessary ? */
2066 fwdma_sync_multiseg_all(sc->it[dmach].am, BUS_DMASYNC_POSTREAD);
2067 while ((chunk = STAILQ_FIRST(&it->stdma)) != NULL) {
2068 db = ((struct fwohcidb_tr *)(chunk->end))->db;
2069 stat = FWOHCI_DMA_READ(db[ldesc].db.desc.res)
2070 >> OHCI_STATUS_SHIFT;
2071 db = ((struct fwohcidb_tr *)(chunk->start))->db;
2072 count = FWOHCI_DMA_READ(db[ldesc].db.desc.res)
2076 STAILQ_REMOVE_HEAD(&it->stdma, link);
2077 switch (stat & FWOHCIEV_MASK){
2078 case FWOHCIEV_ACKCOMPL:
2080 device_printf(fc->dev, "0x%08x\n", count);
2084 device_printf(fc->dev,
2085 "Isochronous transmit err %02x(%s)\n",
2086 stat, fwohcicode[stat & 0x1f]);
2088 STAILQ_INSERT_TAIL(&it->stfree, chunk, link);
2097 fwohci_rbuf_update(struct fwohci_softc *sc, int dmach)
2099 struct firewire_comm *fc = &sc->fc;
2100 volatile struct fwohcidb_tr *db_tr;
2101 struct fw_bulkxfer *chunk;
2102 struct fw_xferq *ir;
2107 ldesc = sc->ir[dmach].ndesc - 1;
2112 fwdma_sync_multiseg_all(sc->ir[dmach].am, BUS_DMASYNC_POSTREAD);
2113 while ((chunk = STAILQ_FIRST(&ir->stdma)) != NULL) {
2114 db_tr = (struct fwohcidb_tr *)chunk->end;
2115 stat = FWOHCI_DMA_READ(db_tr->db[ldesc].db.desc.res)
2116 >> OHCI_STATUS_SHIFT;
2120 if (chunk->mbuf != NULL) {
2121 bus_dmamap_sync(sc->ir[dmach].dmat, db_tr->dma_map,
2122 BUS_DMASYNC_POSTREAD);
2123 bus_dmamap_unload(sc->ir[dmach].dmat, db_tr->dma_map);
2124 } else if (ir->buf != NULL) {
2125 fwdma_sync_multiseg(ir->buf, chunk->poffset,
2126 ir->bnpacket, BUS_DMASYNC_POSTREAD);
2129 printf("fwohci_rbuf_update: this shouldn't happend\n");
2132 STAILQ_REMOVE_HEAD(&ir->stdma, link);
2133 STAILQ_INSERT_TAIL(&ir->stvalid, chunk, link);
2134 switch (stat & FWOHCIEV_MASK) {
2135 case FWOHCIEV_ACKCOMPL:
2139 chunk->resp = EINVAL;
2140 device_printf(fc->dev,
2141 "Isochronous receive err %02x(%s)\n",
2142 stat, fwohcicode[stat & 0x1f]);
2148 if (ir->flag & FWXFERQ_HANDLER)
2156 dump_dma(struct fwohci_softc *sc, u_int32_t ch)
2158 u_int32_t off, cntl, stat, cmd, match;
2168 }else if(ch < IRX_CH){
2169 off = OHCI_ITCTL(ch - ITX_CH);
2171 off = OHCI_IRCTL(ch - IRX_CH);
2173 cntl = stat = OREAD(sc, off);
2174 cmd = OREAD(sc, off + 0xc);
2175 match = OREAD(sc, off + 0x10);
2177 device_printf(sc->fc.dev, "ch %1x cntl:0x%08x cmd:0x%08x match:0x%08x\n",
2184 device_printf(sc->fc.dev, "dma %d ch:%s%s%s%s%s%s %s(%x)\n",
2186 stat & OHCI_CNTL_DMA_RUN ? "RUN," : "",
2187 stat & OHCI_CNTL_DMA_WAKE ? "WAKE," : "",
2188 stat & OHCI_CNTL_DMA_DEAD ? "DEAD," : "",
2189 stat & OHCI_CNTL_DMA_ACTIVE ? "ACTIVE," : "",
2190 stat & OHCI_CNTL_DMA_BT ? "BRANCH," : "",
2191 stat & OHCI_CNTL_DMA_BAD ? "BADDMA," : "",
2192 fwohcicode[stat & 0x1f],
2196 device_printf(sc->fc.dev, "dma %d ch: Nostat\n", ch);
2201 dump_db(struct fwohci_softc *sc, u_int32_t ch)
2203 struct fwohci_dbch *dbch;
2204 struct fwohcidb_tr *cp = NULL, *pp, *np = NULL;
2205 volatile struct fwohcidb *curr = NULL, *prev, *next = NULL;
2220 }else if(ch < IRX_CH){
2221 off = OHCI_ITCTL(ch - ITX_CH);
2222 dbch = &sc->it[ch - ITX_CH];
2224 off = OHCI_IRCTL(ch - IRX_CH);
2225 dbch = &sc->ir[ch - IRX_CH];
2227 cmd = OREAD(sc, off + 0xc);
2229 if( dbch->ndb == 0 ){
2230 device_printf(sc->fc.dev, "No DB is attached ch=%d\n", ch);
2235 for(idb = 0 ; idb < dbch->ndb ; idb ++ ){
2240 cp = STAILQ_NEXT(pp, link);
2245 np = STAILQ_NEXT(cp, link);
2246 for(jdb = 0 ; jdb < dbch->ndesc ; jdb ++ ){
2247 if ((cmd & 0xfffffff0) == cp->bus_addr) {
2257 pp = STAILQ_NEXT(pp, link);
2263 printf("Prev DB %d\n", ch);
2264 print_db(pp, prev, ch, dbch->ndesc);
2266 printf("Current DB %d\n", ch);
2267 print_db(cp, curr, ch, dbch->ndesc);
2269 printf("Next DB %d\n", ch);
2270 print_db(np, next, ch, dbch->ndesc);
2273 printf("dbdump err ch = %d cmd = 0x%08x\n", ch, cmd);
2279 print_db(struct fwohcidb_tr *db_tr, volatile struct fwohcidb *db,
2280 u_int32_t ch, u_int32_t max)
2287 printf("No Descriptor is found\n");
2291 printf("ch = %d\n%8s %s %s %s %s %4s %8s %8s %4s:%4s\n",
2303 for( i = 0 ; i <= max ; i ++){
2304 cmd = FWOHCI_DMA_READ(db[i].db.desc.cmd);
2305 res = FWOHCI_DMA_READ(db[i].db.desc.res);
2306 key = cmd & OHCI_KEY_MASK;
2307 stat = res >> OHCI_STATUS_SHIFT;
2308 #if __FreeBSD_version >= 500000
2309 printf("%08jx %s %s %s %s %5d %08x %08x %04x:%04x",
2310 (uintmax_t)db_tr->bus_addr,
2312 printf("%08x %s %s %s %s %5d %08x %08x %04x:%04x",
2315 dbcode[(cmd >> 28) & 0xf],
2316 dbkey[(cmd >> 24) & 0x7],
2317 dbcond[(cmd >> 20) & 0x3],
2318 dbcond[(cmd >> 18) & 0x3],
2319 cmd & OHCI_COUNT_MASK,
2320 FWOHCI_DMA_READ(db[i].db.desc.addr),
2321 FWOHCI_DMA_READ(db[i].db.desc.depend),
2323 res & OHCI_COUNT_MASK);
2325 printf(" %s%s%s%s%s%s %s(%x)\n",
2326 stat & OHCI_CNTL_DMA_RUN ? "RUN," : "",
2327 stat & OHCI_CNTL_DMA_WAKE ? "WAKE," : "",
2328 stat & OHCI_CNTL_DMA_DEAD ? "DEAD," : "",
2329 stat & OHCI_CNTL_DMA_ACTIVE ? "ACTIVE," : "",
2330 stat & OHCI_CNTL_DMA_BT ? "BRANCH," : "",
2331 stat & OHCI_CNTL_DMA_BAD ? "BADDMA," : "",
2332 fwohcicode[stat & 0x1f],
2336 printf(" Nostat\n");
2338 if(key == OHCI_KEY_ST2 ){
2339 printf("0x%08x 0x%08x 0x%08x 0x%08x\n",
2340 FWOHCI_DMA_READ(db[i+1].db.immed[0]),
2341 FWOHCI_DMA_READ(db[i+1].db.immed[1]),
2342 FWOHCI_DMA_READ(db[i+1].db.immed[2]),
2343 FWOHCI_DMA_READ(db[i+1].db.immed[3]));
2345 if(key == OHCI_KEY_DEVICE){
2348 if((cmd & OHCI_BRANCH_MASK)
2349 == OHCI_BRANCH_ALWAYS){
2352 if((cmd & OHCI_CMD_MASK)
2353 == OHCI_OUTPUT_LAST){
2356 if((cmd & OHCI_CMD_MASK)
2357 == OHCI_INPUT_LAST){
2360 if(key == OHCI_KEY_ST2 ){
2368 fwohci_ibr(struct firewire_comm *fc)
2370 struct fwohci_softc *sc;
2373 device_printf(fc->dev, "Initiate bus reset\n");
2374 sc = (struct fwohci_softc *)fc;
2377 * Set root hold-off bit so that non cyclemaster capable node
2378 * shouldn't became the root node.
2381 fun = fwphy_rddata(sc, FW_PHY_IBR_REG);
2382 fun |= FW_PHY_IBR | FW_PHY_RHB;
2383 fun = fwphy_wrdata(sc, FW_PHY_IBR_REG, fun);
2384 #else /* Short bus reset */
2385 fun = fwphy_rddata(sc, FW_PHY_ISBR_REG);
2386 fun |= FW_PHY_ISBR | FW_PHY_RHB;
2387 fun = fwphy_wrdata(sc, FW_PHY_ISBR_REG, fun);
2392 fwohci_txbufdb(struct fwohci_softc *sc, int dmach, struct fw_bulkxfer *bulkxfer)
2394 struct fwohcidb_tr *db_tr, *fdb_tr;
2395 struct fwohci_dbch *dbch;
2396 volatile struct fwohcidb *db;
2398 volatile struct fwohci_txpkthdr *ohcifp;
2399 unsigned short chtag;
2402 dbch = &sc->it[dmach];
2403 chtag = sc->it[dmach].xferq.flag & 0xff;
2405 db_tr = (struct fwohcidb_tr *)(bulkxfer->start);
2406 fdb_tr = (struct fwohcidb_tr *)(bulkxfer->end);
2408 device_printf(sc->fc.dev, "DB %08x %08x %08x\n", bulkxfer, db_tr->bus_addr, fdb_tr->bus_addr);
2410 for (idb = 0; idb < dbch->xferq.bnpacket; idb ++) {
2412 fp = (struct fw_pkt *)db_tr->buf;
2413 ohcifp = (volatile struct fwohci_txpkthdr *) db[1].db.immed;
2414 ohcifp->mode.ld[0] = fp->mode.ld[0];
2415 ohcifp->mode.stream.len = fp->mode.stream.len;
2416 ohcifp->mode.stream.chtag = chtag;
2417 ohcifp->mode.stream.tcode = 0xa;
2418 ohcifp->mode.stream.spd = 0;
2419 #if BYTE_ORDER == BIG_ENDIAN
2420 FWOHCI_DMA_WRITE(db[1].db.immed[0], db[1].db.immed[0]);
2421 FWOHCI_DMA_WRITE(db[1].db.immed[1], db[1].db.immed[1]);
2424 FWOHCI_DMA_CLEAR(db[2].db.desc.cmd, OHCI_COUNT_MASK);
2425 FWOHCI_DMA_SET(db[2].db.desc.cmd, fp->mode.stream.len);
2426 FWOHCI_DMA_WRITE(db[2].db.desc.res, 0);
2427 #if 0 /* if bulkxfer->npackets changes */
2428 db[2].db.desc.cmd = OHCI_OUTPUT_LAST
2430 | OHCI_BRANCH_ALWAYS;
2431 db[0].db.desc.depend =
2432 = db[dbch->ndesc - 1].db.desc.depend
2433 = STAILQ_NEXT(db_tr, link)->bus_addr | dbch->ndesc;
2435 FWOHCI_DMA_SET(db[0].db.desc.depend, dbch->ndesc);
2436 FWOHCI_DMA_SET(db[dbch->ndesc - 1].db.desc.depend, dbch->ndesc);
2438 bulkxfer->end = (caddr_t)db_tr;
2439 db_tr = STAILQ_NEXT(db_tr, link);
2441 db = ((struct fwohcidb_tr *)bulkxfer->end)->db;
2442 FWOHCI_DMA_CLEAR(db[0].db.desc.depend, 0xf);
2443 FWOHCI_DMA_CLEAR(db[dbch->ndesc - 1].db.desc.depend, 0xf);
2444 #if 0 /* if bulkxfer->npackets changes */
2445 db[dbch->ndesc - 1].db.desc.control |= OHCI_INTERRUPT_ALWAYS;
2446 /* OHCI 1.1 and above */
2447 db[0].db.desc.control |= OHCI_INTERRUPT_ALWAYS;
2450 db_tr = (struct fwohcidb_tr *)bulkxfer->start;
2451 fdb_tr = (struct fwohcidb_tr *)bulkxfer->end;
2452 device_printf(sc->fc.dev, "DB %08x %3d %08x %08x\n", bulkxfer, bulkxfer->npacket, db_tr->bus_addr, fdb_tr->bus_addr);
2458 fwohci_add_tx_buf(struct fwohci_dbch *dbch, struct fwohcidb_tr *db_tr,
2461 volatile struct fwohcidb *db = db_tr->db;
2462 struct fw_xferq *it;
2470 db_tr->buf = fwdma_v_addr(it->buf, poffset);
2473 FWOHCI_DMA_WRITE(db[0].db.desc.cmd,
2474 OHCI_OUTPUT_MORE | OHCI_KEY_ST2 | 8);
2475 FWOHCI_DMA_WRITE(db[2].db.desc.addr,
2476 fwdma_bus_addr(it->buf, poffset) + sizeof(u_int32_t));
2478 FWOHCI_DMA_WRITE(db[2].db.desc.cmd,
2479 OHCI_OUTPUT_LAST | OHCI_UPDATE | OHCI_BRANCH_ALWAYS);
2481 FWOHCI_DMA_WRITE(db[0].db.desc.res, 0);
2482 FWOHCI_DMA_WRITE(db[2].db.desc.res, 0);
2488 fwohci_add_rx_buf(struct fwohci_dbch *dbch, struct fwohcidb_tr *db_tr,
2489 int poffset, struct fwdma_alloc *dummy_dma)
2491 volatile struct fwohcidb *db = db_tr->db;
2492 struct fw_xferq *ir;
2498 if (ir->buf == NULL && (dbch->xferq.flag & FWXFERQ_EXTBUF) == 0) {
2499 db_tr->buf = fwdma_malloc_size(dbch->dmat, &db_tr->dma_map,
2500 ir->psize, &dbuf[0], BUS_DMA_NOWAIT);
2501 if (db_tr->buf == NULL)
2504 dsiz[0] = ir->psize;
2505 bus_dmamap_sync(dbch->dmat, db_tr->dma_map,
2506 BUS_DMASYNC_PREREAD);
2509 if (dummy_dma != NULL) {
2510 dsiz[db_tr->dbcnt] = sizeof(u_int32_t);
2511 dbuf[db_tr->dbcnt++] = dummy_dma->bus_addr;
2513 dsiz[db_tr->dbcnt] = ir->psize;
2514 if (ir->buf != NULL) {
2515 db_tr->buf = fwdma_v_addr(ir->buf, poffset);
2516 dbuf[db_tr->dbcnt] = fwdma_bus_addr( ir->buf, poffset);
2520 for(i = 0 ; i < db_tr->dbcnt ; i++){
2521 FWOHCI_DMA_WRITE(db[i].db.desc.addr, dbuf[i]);
2522 FWOHCI_DMA_WRITE(db[i].db.desc.cmd, OHCI_INPUT_MORE | dsiz[i]);
2523 if (ir->flag & FWXFERQ_STREAM) {
2524 FWOHCI_DMA_SET(db[i].db.desc.cmd, OHCI_UPDATE);
2526 FWOHCI_DMA_WRITE(db[i].db.desc.res, dsiz[i]);
2528 ldesc = db_tr->dbcnt - 1;
2529 if (ir->flag & FWXFERQ_STREAM) {
2530 FWOHCI_DMA_SET(db[ldesc].db.desc.cmd, OHCI_INPUT_LAST);
2532 FWOHCI_DMA_SET(db[ldesc].db.desc.cmd, OHCI_BRANCH_ALWAYS);
2538 fwohci_arcv_swap(struct fw_pkt *fp, int len)
2543 #if BYTE_ORDER == BIG_ENDIAN
2547 ld0 = FWOHCI_DMA_READ(fp->mode.ld[0]);
2549 printf("ld0: x%08x\n", ld0);
2551 fp0 = (struct fw_pkt *)&ld0;
2552 switch (fp0->mode.common.tcode) {
2557 case FWOHCITCODE_PHY:
2568 printf("Unknown tcode %d\n", fp0->mode.common.tcode);
2573 printf("splitted header\n");
2576 #if BYTE_ORDER == BIG_ENDIAN
2577 for(i = 0; i < slen/4; i ++)
2578 fp->mode.ld[i] = FWOHCI_DMA_READ(fp->mode.ld[i]);
2583 #define PLEN(x) roundup2(x, sizeof(u_int32_t))
2585 fwohci_get_plen(struct fwohci_softc *sc, struct fwohci_dbch *dbch, struct fw_pkt *fp)
2589 switch(fp->mode.common.tcode){
2591 r = sizeof(fp->mode.rreqq) + sizeof(u_int32_t);
2594 r = sizeof(fp->mode.wres) + sizeof(u_int32_t);
2597 r = sizeof(fp->mode.wreqq) + sizeof(u_int32_t);
2600 r = sizeof(fp->mode.rreqb) + sizeof(u_int32_t);
2603 r = sizeof(fp->mode.rresq) + sizeof(u_int32_t);
2606 r = sizeof(struct fw_asyhdr) + PLEN(fp->mode.wreqb.len)
2607 + sizeof(u_int32_t);
2610 r = sizeof(struct fw_asyhdr) + PLEN(fp->mode.lreq.len)
2611 + sizeof(u_int32_t);
2614 r = sizeof(struct fw_asyhdr) + PLEN(fp->mode.rresb.len)
2615 + sizeof(u_int32_t);
2618 r = sizeof(struct fw_asyhdr) + PLEN(fp->mode.lres.len)
2619 + sizeof(u_int32_t);
2621 case FWOHCITCODE_PHY:
2625 device_printf(sc->fc.dev, "Unknown tcode %d\n",
2626 fp->mode.common.tcode);
2629 if (r > dbch->xferq.psize) {
2630 device_printf(sc->fc.dev, "Invalid packet length %d\n", r);
2637 fwohci_arcv_free_buf(struct fwohci_dbch *dbch, struct fwohcidb_tr *db_tr)
2639 volatile struct fwohcidb *db = &db_tr->db[0];
2641 FWOHCI_DMA_CLEAR(db->db.desc.depend, 0xf);
2642 FWOHCI_DMA_WRITE(db->db.desc.res, dbch->xferq.psize);
2643 FWOHCI_DMA_SET(dbch->bottom->db[0].db.desc.depend, 1);
2644 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_PREWRITE);
2645 dbch->bottom = db_tr;
2649 fwohci_arcv(struct fwohci_softc *sc, struct fwohci_dbch *dbch, int count)
2651 struct fwohcidb_tr *db_tr;
2652 struct iovec vec[2];
2653 struct fw_pkt pktbuf;
2657 u_int32_t stat, off, status;
2659 int len, plen, hlen, pcnt, offset;
2664 if(&sc->arrq == dbch){
2666 }else if(&sc->arrs == dbch){
2675 /* XXX we cannot handle a packet which lies in more than two buf */
2676 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_POSTREAD);
2677 fwdma_sync_multiseg_all(dbch->am, BUS_DMASYNC_POSTWRITE);
2678 status = FWOHCI_DMA_READ(db_tr->db[0].db.desc.res) >> OHCI_STATUS_SHIFT;
2679 resCount = FWOHCI_DMA_READ(db_tr->db[0].db.desc.res) & OHCI_COUNT_MASK;
2681 printf("status 0x%04x, resCount 0x%04x\n", status, resCount);
2683 while (status & OHCI_CNTL_DMA_ACTIVE) {
2684 len = dbch->xferq.psize - resCount;
2685 ld = (u_int8_t *)db_tr->buf;
2686 if (dbch->pdb_tr == NULL) {
2687 len -= dbch->buf_offset;
2688 ld += dbch->buf_offset;
2691 bus_dmamap_sync(dbch->dmat, db_tr->dma_map,
2692 BUS_DMASYNC_POSTREAD);
2694 if (count >= 0 && count-- == 0)
2696 if(dbch->pdb_tr != NULL){
2697 /* we have a fragment in previous buffer */
2700 offset = dbch->buf_offset;
2703 buf = dbch->pdb_tr->buf + offset;
2704 rlen = dbch->xferq.psize - offset;
2706 printf("rlen=%d, offset=%d\n",
2707 rlen, dbch->buf_offset);
2708 if (dbch->buf_offset < 0) {
2709 /* splitted in header, pull up */
2712 p = (char *)&pktbuf;
2713 bcopy(buf, p, rlen);
2715 /* this must be too long but harmless */
2716 rlen = sizeof(pktbuf) - rlen;
2718 printf("why rlen < 0\n");
2719 bcopy(db_tr->buf, p, rlen);
2722 hlen = fwohci_arcv_swap(&pktbuf, sizeof(pktbuf));
2724 printf("hlen < 0 shouldn't happen");
2726 offset = sizeof(pktbuf);
2727 vec[0].iov_base = (char *)&pktbuf;
2728 vec[0].iov_len = offset;
2730 /* splitted in payload */
2732 vec[0].iov_base = buf;
2733 vec[0].iov_len = rlen;
2735 fp=(struct fw_pkt *)vec[0].iov_base;
2738 /* no fragment in previous buffer */
2739 fp=(struct fw_pkt *)ld;
2740 hlen = fwohci_arcv_swap(fp, len);
2742 /* XXX need reset */
2745 dbch->pdb_tr = db_tr;
2746 dbch->buf_offset = - dbch->buf_offset;
2749 printf("resCount != 0 !?\n");
2755 plen = fwohci_get_plen(sc, dbch, fp) - offset;
2757 /* minimum header size + trailer
2758 = sizeof(fw_pkt) so this shouldn't happens */
2759 printf("plen is negative! offset=%d\n", offset);
2765 dbch->pdb_tr = db_tr;
2767 printf("splitted payload\n");
2770 printf("resCount != 0 !?\n");
2773 vec[nvec].iov_base = ld;
2774 vec[nvec].iov_len = plen;
2778 dbch->buf_offset = ld - (u_int8_t *)db_tr->buf;
2780 printf("nvec == 0\n");
2782 /* DMA result-code will be written at the tail of packet */
2783 #if BYTE_ORDER == BIG_ENDIAN
2784 stat = FWOHCI_DMA_READ(((struct fwohci_trailer *)(ld - sizeof(struct fwohci_trailer)))->stat) >> 16;
2786 stat = ((struct fwohci_trailer *)(ld - sizeof(struct fwohci_trailer)))->stat;
2789 printf("plen: %d, stat %x\n", plen ,stat);
2791 spd = (stat >> 5) & 0x3;
2794 case FWOHCIEV_ACKPEND:
2796 printf("fwohci_arcv: ack pending tcode=0x%x..\n", fp->mode.common.tcode);
2799 case FWOHCIEV_ACKCOMPL:
2800 if ((vec[nvec-1].iov_len -=
2801 sizeof(struct fwohci_trailer)) == 0)
2803 fw_rcv(&sc->fc, vec, nvec, 0, spd);
2805 case FWOHCIEV_BUSRST:
2806 if (sc->fc.status != FWBUSRESET)
2807 printf("got BUSRST packet!?\n");
2810 device_printf(sc->fc.dev, "Async DMA Receive error err = %02x %s\n", stat, fwohcicode[stat]);
2817 if (dbch->pdb_tr != NULL) {
2818 fwohci_arcv_free_buf(dbch, dbch->pdb_tr);
2819 dbch->pdb_tr = NULL;
2824 if (resCount == 0) {
2825 /* done on this buffer */
2826 if (dbch->pdb_tr == NULL) {
2827 fwohci_arcv_free_buf(dbch, db_tr);
2828 dbch->buf_offset = 0;
2830 if (dbch->pdb_tr != db_tr)
2831 printf("pdb_tr != db_tr\n");
2832 db_tr = STAILQ_NEXT(db_tr, link);
2833 status = FWOHCI_DMA_READ(db_tr->db[0].db.desc.res)
2834 >> OHCI_STATUS_SHIFT;
2835 resCount = FWOHCI_DMA_READ(db_tr->db[0].db.desc.res)
2837 /* XXX check buffer overrun */
2840 dbch->buf_offset = dbch->xferq.psize - resCount;
2843 /* XXX make sure DMA is not dead */
2847 printf("fwohci_arcv: no packets\n");