2 * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. The name of the author may not be used to endorse or promote products
11 * derived from this software withough specific prior written permission
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 * $FreeBSD: src/sys/dev/pdq/pdq.c,v 1.5 1999/08/28 00:42:19 peter Exp $
25 * $DragonFly: src/sys/dev/netif/pdq_layer/Attic/pdq.c,v 1.4 2004/02/13 02:44:48 joerg Exp $
30 * DEC PDQ FDDI Controller O/S independent code
32 * This module should work any PDQ based board. Note that changes for
33 * MIPS and Alpha architectures (or any other architecture which requires
34 * a flushing of memory or write buffers and/or has incoherent caches)
35 * have yet to be made.
37 * However, it is expected that the PDQ_CSR_WRITE macro will cause a
38 * flushing of the write buffers.
41 #define PDQ_HWSUPPORT /* for pdq.h */
43 #if defined(__DragonFly__) || defined(__FreeBSD__)
51 #define PDQ_ROUNDUP(n, x) (((n) + ((x) - 1)) & ~((x) - 1))
52 #define PDQ_CMD_RX_ALIGNMENT 16
54 #if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
55 #define PDQ_PRINTF(x) printf x
57 #define PDQ_PRINTF(x) do { } while (0)
60 static const char * const pdq_halt_codes[] = {
61 "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
62 "Software Fault", "Hardware Fault", "PC Trace Path Test",
63 "DMA Error", "Image CRC Error", "Adapter Processer Error"
66 static const char * const pdq_adapter_states[] = {
67 "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
68 "Link Available", "Link Unavailable", "Halted", "Ring Member"
72 * The following are used in conjunction with
75 static const char * const pdq_entities[] = {
76 "Station", "Link", "Phy Port"
79 static const char * const pdq_station_events[] = {
83 static const char * const pdq_station_arguments[] = {
87 static const char * const pdq_link_events[] = {
90 "Block Check Error (CRC)",
95 "Receive Data Overrun",
98 "Ring Initialization Initiated",
99 "Ring Initialization Received",
100 "Ring Beacon Initiated",
101 "Duplicate Address Failure",
102 "Duplicate Token Detected",
106 "Directed Beacon Received",
109 static const char * const pdq_link_arguments[] = {
116 static const char * const pdq_phy_events[] = {
117 "LEM Error Monitor Reject",
118 "Elasticy Buffer Error",
119 "Link Confidence Test Reject"
122 static const char * const pdq_phy_arguments[] = {
126 static const char * const * const pdq_event_arguments[] = {
127 pdq_station_arguments,
132 static const char * const * const pdq_event_codes[] = {
138 static const char * const pdq_station_types[] = {
139 "SAS", "DAC", "SAC", "NAC", "DAS"
142 static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
144 static const char pdq_phy_types[] = "ABSM";
146 static const char * const pdq_pmd_types0[] = {
147 "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
151 static const char * const pdq_pmd_types100[] = {
152 "Low Power", "Thin Wire", "Shielded Twisted Pair",
153 "Unshielded Twisted Pair"
156 static const char * const * const pdq_pmd_types[] = {
157 pdq_pmd_types0, pdq_pmd_types100
160 static const char * const pdq_descriptions[] = {
169 pdq_print_fddi_chars(
171 const pdq_response_status_chars_get_t *rsp)
173 const char hexchars[] = "0123456789abcdef";
176 #if !defined(__bsdi__) && !defined(__NetBSD__)
181 "DEC %s FDDI %s Controller\n",
182 #if !defined(__bsdi__) && !defined(__NetBSD__)
185 pdq_descriptions[pdq->pdq_type],
186 pdq_station_types[rsp->status_chars_get.station_type]);
188 printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
190 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
191 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
192 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
193 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
194 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
195 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
196 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
197 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
198 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
199 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
200 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
201 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
202 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
203 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
204 rsp->status_chars_get.module_rev.fwrev_bytes[0]);
206 if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) {
207 printf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]);
210 printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
212 rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "",
213 pdq_phy_types[rsp->status_chars_get.phy_type[0]],
214 pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]);
216 if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
217 printf(", FDDI Port[B] = %c (PMD = %s)",
218 pdq_phy_types[rsp->status_chars_get.phy_type[1]],
219 pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]);
228 pdq_bus_memaddr_t csr_base,
232 csrs->csr_base = csr_base;
233 csrs->csr_port_reset = PDQ_CSR_OFFSET(csr_base, 0 * csrsize);
234 csrs->csr_host_data = PDQ_CSR_OFFSET(csr_base, 1 * csrsize);
235 csrs->csr_port_control = PDQ_CSR_OFFSET(csr_base, 2 * csrsize);
236 csrs->csr_port_data_a = PDQ_CSR_OFFSET(csr_base, 3 * csrsize);
237 csrs->csr_port_data_b = PDQ_CSR_OFFSET(csr_base, 4 * csrsize);
238 csrs->csr_port_status = PDQ_CSR_OFFSET(csr_base, 5 * csrsize);
239 csrs->csr_host_int_type_0 = PDQ_CSR_OFFSET(csr_base, 6 * csrsize);
240 csrs->csr_host_int_enable = PDQ_CSR_OFFSET(csr_base, 7 * csrsize);
241 csrs->csr_type_2_producer = PDQ_CSR_OFFSET(csr_base, 8 * csrsize);
242 csrs->csr_cmd_response_producer = PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
243 csrs->csr_cmd_request_producer = PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
244 csrs->csr_host_smt_producer = PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
245 csrs->csr_unsolicited_producer = PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
250 pdq_pci_csrs_t *csrs,
252 pdq_bus_memaddr_t csr_base,
256 csrs->csr_base = csr_base;
257 csrs->csr_pfi_mode_control = PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
258 csrs->csr_pfi_status = PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
259 csrs->csr_fifo_write = PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
260 csrs->csr_fifo_read = PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
264 pdq_flush_databuf_queue(
265 pdq_databuf_queue_t *q)
267 PDQ_OS_DATABUF_T *pdu;
269 PDQ_OS_DATABUF_DEQUEUE(q, pdu);
272 PDQ_OS_DATABUF_FREE(pdu);
278 const pdq_csrs_t * const csrs,
282 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
283 PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
284 while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
286 PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
287 if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
288 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
289 return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
291 /* adapter failure */
298 const pdq_csrs_t * const csrs,
299 pdq_lanaddr_t *hwaddr)
303 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
304 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
305 data = PDQ_CSR_READ(csrs, csr_host_data);
307 hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
308 hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
309 hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
310 hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
312 PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
313 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
314 data = PDQ_CSR_READ(csrs, csr_host_data);
316 hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
317 hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
322 const pdq_csrs_t * const csrs,
327 pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
328 data = PDQ_CSR_READ(csrs, csr_host_data);
330 fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
331 fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
332 fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
333 fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
339 pdq_response_error_log_get_t *log_entry)
341 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
342 pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
344 pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
346 while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
347 *ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
348 if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
351 return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
354 static pdq_chip_rev_t
356 const pdq_csrs_t * const csrs)
360 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
361 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
362 data = PDQ_CSR_READ(csrs, csr_host_data);
364 return (pdq_chip_rev_t) data;
367 static const struct {
370 const char *cmd_name;
372 { sizeof(pdq_cmd_generic_t), /* 0 - PDQC_START */
373 sizeof(pdq_response_generic_t),
376 { sizeof(pdq_cmd_filter_set_t), /* 1 - PDQC_FILTER_SET */
377 sizeof(pdq_response_generic_t),
380 { sizeof(pdq_cmd_generic_t), /* 2 - PDQC_FILTER_GET */
381 sizeof(pdq_response_filter_get_t),
384 { sizeof(pdq_cmd_chars_set_t), /* 3 - PDQC_CHARS_SET */
385 sizeof(pdq_response_generic_t),
388 { sizeof(pdq_cmd_generic_t), /* 4 - PDQC_STATUS_CHARS_GET */
389 sizeof(pdq_response_status_chars_get_t),
393 { sizeof(pdq_cmd_generic_t), /* 5 - PDQC_COUNTERS_GET */
394 sizeof(pdq_response_counters_get_t),
397 { sizeof(pdq_cmd_counters_set_t), /* 6 - PDQC_COUNTERS_SET */
398 sizeof(pdq_response_generic_t),
402 { 0, 0, "Counters Get" },
403 { 0, 0, "Counters Set" },
405 { sizeof(pdq_cmd_addr_filter_set_t), /* 7 - PDQC_ADDR_FILTER_SET */
406 sizeof(pdq_response_generic_t),
409 { sizeof(pdq_cmd_generic_t), /* 8 - PDQC_ADDR_FILTER_GET */
410 sizeof(pdq_response_addr_filter_get_t),
414 { sizeof(pdq_cmd_generic_t), /* 9 - PDQC_ERROR_LOG_CLEAR */
415 sizeof(pdq_response_generic_t),
418 { sizeof(pdq_cmd_generic_t), /* 10 - PDQC_ERROR_LOG_SET */
419 sizeof(pdq_response_generic_t),
422 { sizeof(pdq_cmd_generic_t), /* 11 - PDQC_FDDI_MIB_GET */
423 sizeof(pdq_response_generic_t),
426 { sizeof(pdq_cmd_generic_t), /* 12 - PDQC_DEC_EXT_MIB_GET */
427 sizeof(pdq_response_generic_t),
430 { sizeof(pdq_cmd_generic_t), /* 13 - PDQC_DEC_SPECIFIC_GET */
431 sizeof(pdq_response_generic_t),
434 { sizeof(pdq_cmd_generic_t), /* 14 - PDQC_SNMP_SET */
435 sizeof(pdq_response_generic_t),
439 { sizeof(pdq_cmd_generic_t), /* 16 - PDQC_SMT_MIB_GET */
440 sizeof(pdq_response_generic_t),
443 { sizeof(pdq_cmd_generic_t), /* 17 - PDQC_SMT_MIB_SET */
444 sizeof(pdq_response_generic_t),
454 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
455 pdq_command_info_t * const ci = &pdq->pdq_command_info;
456 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
458 pdq_uint32_t cmdlen, rsplen, mask;
461 * If there are commands or responses active or there aren't
462 * any pending commands, then don't queue any more.
464 if (ci->ci_command_active || ci->ci_pending_commands == 0)
468 * Determine which command needs to be queued.
470 op = PDQC_SMT_MIB_SET;
471 for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
472 op = (pdq_cmd_code_t) ((int) op - 1);
474 * Obtain the sizes needed for the command and response.
475 * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
476 * always properly aligned.
478 cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
479 rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
483 * Since only one command at a time will be queued, there will always
488 * Obtain and fill in the descriptor for the command (descriptor is
491 dbp->pdqdb_command_requests[ci->ci_request_producer].txd_seg_len = cmdlen;
492 PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
495 * Obtain and fill in the descriptor for the response (descriptor is
498 dbp->pdqdb_command_responses[ci->ci_response_producer].rxd_seg_len_hi = cmdlen / 16;
499 PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
502 * Clear the command area, set the opcode, and the command from the pending
506 PDQ_OS_MEMZERO(ci->ci_bufstart, cmdlen);
507 *(pdq_cmd_code_t *) ci->ci_bufstart = op;
508 ci->ci_pending_commands &= ~mask;
511 * Fill in the command area, if needed.
514 case PDQC_FILTER_SET: {
515 pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_bufstart;
517 filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM;
518 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
520 filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM;
521 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
523 filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM;
524 filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
526 filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER;
527 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
529 filter_set->filter_set_items[idx].item_code = PDQI_EOL;
532 case PDQC_ADDR_FILTER_SET: {
533 pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_bufstart;
534 pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
535 addr->lanaddr_bytes[0] = 0xFF;
536 addr->lanaddr_bytes[1] = 0xFF;
537 addr->lanaddr_bytes[2] = 0xFF;
538 addr->lanaddr_bytes[3] = 0xFF;
539 addr->lanaddr_bytes[4] = 0xFF;
540 addr->lanaddr_bytes[5] = 0xFF;
542 pdq_os_addr_fill(pdq, addr, 61);
545 default: { /* to make gcc happy */
550 * At this point the command is done. All that needs to be done is to
551 * produce it to the PDQ.
553 PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
554 pdq_cmd_info[op].cmd_name));
556 ci->ci_command_active++;
557 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
558 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
562 pdq_process_command_responses(
565 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
566 pdq_command_info_t * const ci = &pdq->pdq_command_info;
567 volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
568 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
569 const pdq_response_generic_t *rspgen;
572 * We have to process the command and response in tandem so
573 * just wait for the response to be consumed. If it has been
574 * consumed then the command must have been as well.
577 if (cbp->pdqcb_command_response == ci->ci_response_completion)
580 PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion);
582 rspgen = (const pdq_response_generic_t *) ci->ci_bufstart;
583 PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS);
584 PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d)\n",
585 pdq_cmd_info[rspgen->generic_op].cmd_name,
586 rspgen->generic_status));
588 if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
589 pdq->pdq_flags &= ~PDQ_PRINTCHARS;
590 pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
593 PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
594 PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
595 ci->ci_command_active = 0;
597 if (ci->ci_pending_commands != 0) {
598 pdq_queue_commands(pdq);
600 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
601 ci->ci_response_producer | (ci->ci_response_completion << 8));
602 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
603 ci->ci_request_producer | (ci->ci_request_completion << 8));
608 * This following routine processes unsolicited events.
609 * In addition, it also fills the unsolicited queue with
610 * event buffers so it can be used to initialize the queue
614 pdq_process_unsolicited_events(
617 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
618 pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
619 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
620 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
621 const pdq_unsolicited_event_t *event;
625 * Process each unsolicited event (if any).
628 while (cbp->pdqcb_unsolicited_event != ui->ui_completion) {
629 rxd = &dbp->pdqdb_unsolicited_events[ui->ui_completion];
630 event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
632 switch (event->event_type) {
633 case PDQ_UNSOLICITED_EVENT: {
634 printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
636 pdq_entities[event->event_entity],
637 pdq_event_codes[event->event_entity][event->event_code.value]);
638 if (event->event_entity == PDQ_ENTITY_PHY_PORT)
639 printf("[%d]", event->event_index);
643 case PDQ_UNSOLICITED_COUNTERS: {
647 PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
652 * Now give back the event buffers back to the PDQ.
654 PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
657 PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
658 ui->ui_producer | (ui->ui_completion << 8));
662 pdq_process_received_data(
665 pdq_rxdesc_t *receives,
666 pdq_uint32_t completion_goal,
667 pdq_uint32_t ring_mask)
669 pdq_uint32_t completion = rx->rx_completion;
670 pdq_uint32_t producer = rx->rx_producer;
671 PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
675 while (completion != completion_goal) {
676 PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
677 pdq_uint8_t *dataptr;
678 pdq_uint32_t fc, datalen, pdulen, segcnt;
679 pdq_rxstatus_t status;
681 fpdu = lpdu = buffers[completion];
682 PDQ_ASSERT(fpdu != NULL);
684 dataptr = PDQ_OS_DATABUF_PTR(fpdu);
685 status = *(pdq_rxstatus_t *) dataptr;
686 if ((status.rxs_status & 0x200000) == 0) {
687 datalen = status.rxs_status & 0x1FFF;
688 fc = dataptr[PDQ_RX_FC_OFFSET];
689 switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
690 case PDQ_FDDI_LLC_ASYNC:
691 case PDQ_FDDI_LLC_SYNC:
692 case PDQ_FDDI_IMP_ASYNC:
693 case PDQ_FDDI_IMP_SYNC: {
694 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
695 PDQ_PRINTF(("discard: bad length %d\n", datalen));
701 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
706 PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
711 * Update the lengths of the data buffers now that we know
714 pdulen = datalen - 4 /* CRC */;
715 segcnt = (pdulen + PDQ_RX_FC_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
716 PDQ_OS_DATABUF_ALLOC(npdu);
718 PDQ_PRINTF(("discard: no databuf #0\n"));
721 buffers[completion] = npdu;
722 for (idx = 1; idx < segcnt; idx++) {
723 PDQ_OS_DATABUF_ALLOC(npdu);
725 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
726 PDQ_OS_DATABUF_FREE(fpdu);
729 PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
730 lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
731 buffers[(completion + idx) & ring_mask] = npdu;
733 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
734 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
735 buffers[(producer + idx) & ring_mask] =
736 buffers[(completion + idx) & ring_mask];
737 buffers[(completion + idx) & ring_mask] = NULL;
739 PDQ_OS_DATABUF_ADJ(fpdu, PDQ_RX_FC_OFFSET);
741 PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
743 PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_RX_FC_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
745 pdq_os_receive_pdu(pdq, fpdu, pdulen);
746 rx->rx_free += PDQ_RX_SEGCNT;
747 PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
748 PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
751 PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
752 status.rxs_rcc_badpdu, status.rxs_rcc_badcrc,
753 status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e));
754 if (status.rxs_rcc_reason == 7)
756 if (status.rxs_rcc_reason != 0) {
759 if (status.rxs_rcc_badcrc) {
760 printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
762 dataptr[PDQ_RX_FC_OFFSET+1],
763 dataptr[PDQ_RX_FC_OFFSET+2],
764 dataptr[PDQ_RX_FC_OFFSET+3],
765 dataptr[PDQ_RX_FC_OFFSET+4],
766 dataptr[PDQ_RX_FC_OFFSET+5],
767 dataptr[PDQ_RX_FC_OFFSET+6]);
768 /* rx->rx_badcrc++; */
769 } else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) {
770 /* rx->rx_frame_status_errors++; */
777 * Discarded frames go right back on the queue; therefore
778 * ring entries were freed.
780 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
781 buffers[producer] = buffers[completion];
782 buffers[completion] = NULL;
783 rxd = &receives[rx->rx_producer];
785 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
787 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
790 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
791 rxd->rxd_pa_lo = PDQ_OS_VA_TO_PA(pdq, PDQ_OS_DATABUF_PTR(buffers[rx->rx_producer]));
792 PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
793 PDQ_ADVANCE(producer, 1, ring_mask);
794 PDQ_ADVANCE(completion, 1, ring_mask);
797 rx->rx_completion = completion;
799 while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
800 PDQ_OS_DATABUF_T *pdu;
802 * Allocate the needed number of data buffers.
803 * Try to obtain them from our free queue before
804 * asking the system for more.
806 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
807 if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
808 PDQ_OS_DATABUF_ALLOC(pdu);
811 buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
813 rxd = &receives[(rx->rx_producer + idx) & ring_mask];
815 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
817 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
820 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
821 rxd->rxd_pa_lo = PDQ_OS_VA_TO_PA(pdq, PDQ_OS_DATABUF_PTR(pdu));
823 if (idx < PDQ_RX_SEGCNT) {
825 * We didn't get all databufs required to complete a new
826 * receive buffer. Keep the ones we got and retry a bit
827 * later for the rest.
831 PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
832 rx->rx_free -= PDQ_RX_SEGCNT;
837 pdq_queue_transmit_data(
839 PDQ_OS_DATABUF_T *pdu)
841 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
842 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
843 pdq_uint32_t producer = tx->tx_producer;
844 pdq_txdesc_t *eop = NULL;
845 PDQ_OS_DATABUF_T *pdu0;
846 pdq_uint32_t freecnt;
851 dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
852 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
854 for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
855 pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
856 const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
859 * The first segment is limited to the space remaining in
860 * page. All segments after that can be up to a full page
863 fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
864 while (datalen > 0 && freecnt > 0) {
865 pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
868 * Initialize the transmit descriptor
870 eop = &dbp->pdqdb_transmits[producer];
871 eop->txd_seg_len = seglen;
872 eop->txd_pa_lo = PDQ_OS_VA_TO_PA(pdq, dataptr);
873 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
877 fraglen = PDQ_OS_PAGESIZE;
879 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
881 pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
884 PDQ_ASSERT(freecnt == 0);
886 * If we still have data to process then the ring was too full
887 * to store the PDU. Return FALSE so the caller will requeue
893 * Everything went fine. Finish it up.
895 tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
897 PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
898 tx->tx_producer = producer;
899 tx->tx_free = freecnt;
900 PDQ_DO_TYPE2_PRODUCER(pdq);
905 pdq_process_transmitted_data(
908 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
909 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
910 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
911 pdq_uint32_t completion = tx->tx_completion;
913 while (completion != cbp->pdqcb_transmits) {
914 PDQ_OS_DATABUF_T *pdu;
915 pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
916 PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
917 PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
918 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
919 pdq_os_transmit_done(pdq, pdu);
920 tx->tx_free += descriptor_count;
922 PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
924 if (tx->tx_completion != completion) {
925 tx->tx_completion = completion;
926 pdq_os_restart_transmitter(pdq);
928 PDQ_DO_TYPE2_PRODUCER(pdq);
932 pdq_flush_transmitter(
935 volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
936 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
939 PDQ_OS_DATABUF_T *pdu;
940 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
944 * Don't call transmit done since the packet never made it
947 PDQ_OS_DATABUF_FREE(pdu);
950 tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
951 tx->tx_completion = cbp->pdqcb_transmits = tx->tx_producer;
953 PDQ_DO_TYPE2_PRODUCER(pdq);
960 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
964 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
965 if (state == PDQS_DMA_UNAVAILABLE)
967 PDQ_CSR_WRITE(csrs, csr_port_data_a,
968 (state == PDQS_HALTED) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
969 PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
970 PDQ_OS_USEC_DELAY(100);
971 PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
972 for (cnt = 45000;;cnt--) {
973 PDQ_OS_USEC_DELAY(1000);
974 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
975 if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
978 PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 45000 - cnt));
979 PDQ_OS_USEC_DELAY(10000);
980 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
981 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
986 * The following routine brings the PDQ from whatever state it is
987 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
994 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
995 int cnt, pass = 0, idx;
996 PDQ_OS_DATABUF_T **buffers;
999 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1000 if (state != PDQS_DMA_UNAVAILABLE) {
1002 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1003 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1007 case PDQS_RING_MEMBER:
1008 case PDQS_LINK_UNAVAILABLE:
1009 case PDQS_LINK_AVAILABLE: {
1010 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1011 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1012 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1013 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1014 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1017 case PDQS_DMA_AVAILABLE: {
1018 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1019 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1020 pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1021 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1022 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1025 case PDQS_DMA_UNAVAILABLE: {
1031 * Now we should be in DMA_UNAVAILABLE. So bring the PDQ into
1036 * Obtain the hardware address and firmware revisions
1037 * (MLA = my long address which is FDDI speak for hardware address)
1039 pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1040 pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1041 pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1043 if (pdq->pdq_type == PDQ_DEFPA) {
1045 * Disable interrupts and DMA.
1047 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1048 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1052 * Flush all the databuf queues.
1054 pdq_flush_databuf_queue(&pdq->pdq_tx_info.tx_txq);
1055 pdq->pdq_flags &= ~PDQ_TXOK;
1056 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1057 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1058 if (buffers[idx] != NULL) {
1059 PDQ_OS_DATABUF_FREE(buffers[idx]);
1060 buffers[idx] = NULL;
1063 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1064 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1065 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1066 if (buffers[idx] != NULL) {
1067 PDQ_OS_DATABUF_FREE(buffers[idx]);
1068 buffers[idx] = NULL;
1071 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1074 * Reset the consumer indexes to 0.
1076 pdq->pdq_cbp->pdqcb_receives = 0;
1077 pdq->pdq_cbp->pdqcb_transmits = 0;
1078 pdq->pdq_cbp->pdqcb_host_smt = 0;
1079 pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1080 pdq->pdq_cbp->pdqcb_command_response = 0;
1081 pdq->pdq_cbp->pdqcb_command_request = 0;
1084 * Reset the producer and completion indexes to 0.
1086 pdq->pdq_command_info.ci_request_producer = 0;
1087 pdq->pdq_command_info.ci_response_producer = 0;
1088 pdq->pdq_command_info.ci_request_completion = 0;
1089 pdq->pdq_command_info.ci_response_completion = 0;
1090 pdq->pdq_unsolicited_info.ui_producer = 0;
1091 pdq->pdq_unsolicited_info.ui_completion = 0;
1092 pdq->pdq_rx_info.rx_producer = 0;
1093 pdq->pdq_rx_info.rx_completion = 0;
1094 pdq->pdq_tx_info.tx_producer = 0;
1095 pdq->pdq_tx_info.tx_completion = 0;
1096 pdq->pdq_host_smt_info.rx_producer = 0;
1097 pdq->pdq_host_smt_info.rx_completion = 0;
1099 pdq->pdq_command_info.ci_command_active = 0;
1100 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1101 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1104 * Allow the DEFPA to do DMA. Then program the physical
1105 * addresses of the consumer and descriptor blocks.
1107 if (pdq->pdq_type == PDQ_DEFPA) {
1109 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1110 PDQ_PFI_MODE_DMA_ENABLE);
1112 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1113 PDQ_PFI_MODE_DMA_ENABLE
1114 /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1119 * Make sure the unsolicited queue has events ...
1121 pdq_process_unsolicited_events(pdq);
1123 if (pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1124 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1126 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1127 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1128 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1130 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1131 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_OS_VA_TO_PA(pdq, pdq->pdq_cbp));
1132 pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1134 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1135 PDQ_CSR_WRITE(csrs, csr_port_data_a,
1136 PDQ_OS_VA_TO_PA(pdq, pdq->pdq_dbp) | PDQ_DMA_INIT_LW_BSWAP_DATA);
1137 pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1139 for (cnt = 0; cnt < 1000; cnt++) {
1140 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1141 if (state == PDQS_HALTED) {
1147 if (state == PDQS_DMA_AVAILABLE) {
1148 PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1151 PDQ_OS_USEC_DELAY(1000);
1153 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1155 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1156 PDQ_CSR_WRITE(csrs, csr_host_int_enable, 0) /* PDQ_HOST_INT_STATE_CHANGE
1157 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1158 |PDQ_HOST_INT_UNSOL_ENABLE */;
1161 * Any other command but START should be valid.
1163 pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1164 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1165 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1166 pdq_queue_commands(pdq);
1168 if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1170 * Now wait (up to 100ms) for the command(s) to finish.
1172 for (cnt = 0; cnt < 1000; cnt++) {
1173 pdq_process_command_responses(pdq);
1174 if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1176 PDQ_OS_USEC_DELAY(1000);
1178 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1188 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1191 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1192 PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1193 PDQ_ASSERT(state != PDQS_RESET);
1194 PDQ_ASSERT(state != PDQS_HALTED);
1195 PDQ_ASSERT(state != PDQS_UPGRADE);
1196 PDQ_ASSERT(state != PDQS_RING_MEMBER);
1198 case PDQS_DMA_AVAILABLE: {
1200 * The PDQ after being reset screws up some of its state.
1201 * So we need to clear all the errors/interrupts so the real
1202 * ones will get through.
1204 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1205 PDQ_CSR_WRITE(csrs, csr_host_int_enable, PDQ_HOST_INT_STATE_CHANGE|PDQ_HOST_INT_XMT_DATA_FLUSH
1206 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1207 |PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_TX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE);
1209 * Set the MAC and address filters and start up the PDQ.
1211 pdq_process_unsolicited_events(pdq);
1212 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1213 pdq->pdq_dbp->pdqdb_receives,
1214 pdq->pdq_cbp->pdqcb_receives,
1215 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1216 PDQ_DO_TYPE2_PRODUCER(pdq);
1217 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1218 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1219 pdq->pdq_dbp->pdqdb_host_smt,
1220 pdq->pdq_cbp->pdqcb_host_smt,
1221 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1222 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1223 pdq->pdq_host_smt_info.rx_producer
1224 | (pdq->pdq_host_smt_info.rx_completion << 8));
1226 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1227 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET) | PDQ_BITMASK(PDQC_START);
1228 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1229 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1230 pdq_queue_commands(pdq);
1233 case PDQS_LINK_UNAVAILABLE:
1234 case PDQS_LINK_AVAILABLE: {
1235 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1236 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET);
1237 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1238 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1239 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1240 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1241 pdq->pdq_dbp->pdqdb_host_smt,
1242 pdq->pdq_cbp->pdqcb_host_smt,
1243 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1244 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1245 pdq->pdq_host_smt_info.rx_producer
1246 | (pdq->pdq_host_smt_info.rx_completion << 8));
1248 pdq_process_unsolicited_events(pdq);
1249 pdq_queue_commands(pdq);
1252 case PDQS_RING_MEMBER: {
1254 default: { /* to make gcc happy */
1264 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1268 if (pdq->pdq_type == PDQ_DEFPA)
1269 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1271 while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1273 PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1274 if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1275 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1276 pdq->pdq_dbp->pdqdb_receives,
1277 pdq->pdq_cbp->pdqcb_receives,
1278 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1279 PDQ_DO_TYPE2_PRODUCER(pdq);
1281 if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1282 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1283 pdq->pdq_dbp->pdqdb_host_smt,
1284 pdq->pdq_cbp->pdqcb_host_smt,
1285 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1286 PDQ_DO_HOST_SMT_PRODUCER(pdq);
1288 if (data & PDQ_PSTS_XMT_DATA_PENDING)
1289 pdq_process_transmitted_data(pdq);
1290 if (data & PDQ_PSTS_UNSOL_PENDING)
1291 pdq_process_unsolicited_events(pdq);
1292 if (data & PDQ_PSTS_CMD_RSP_PENDING)
1293 pdq_process_command_responses(pdq);
1294 if (data & PDQ_PSTS_TYPE_0_PENDING) {
1295 data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1296 if (data & PDQ_HOST_INT_STATE_CHANGE) {
1297 pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1298 printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1299 if (state == PDQS_LINK_UNAVAILABLE) {
1300 pdq->pdq_flags &= ~PDQ_TXOK;
1301 } else if (state == PDQS_LINK_AVAILABLE) {
1302 pdq->pdq_flags |= PDQ_TXOK;
1303 pdq_os_restart_transmitter(pdq);
1304 } else if (state == PDQS_HALTED) {
1305 pdq_response_error_log_get_t log_entry;
1306 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1307 printf(": halt code = %d (%s)\n",
1308 halt_code, pdq_halt_codes[halt_code]);
1309 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1310 PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1311 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1312 data & PDQ_HOST_INT_FATAL_ERROR));
1314 pdq_read_error_log(pdq, &log_entry);
1316 if (pdq->pdq_flags & PDQ_RUNNING)
1321 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1323 if (data & PDQ_HOST_INT_FATAL_ERROR) {
1325 if (pdq->pdq_flags & PDQ_RUNNING)
1329 if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1330 printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1331 pdq->pdq_flags &= ~PDQ_TXOK;
1332 pdq_flush_transmitter(pdq);
1333 pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1334 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1337 if (pdq->pdq_type == PDQ_DEFPA)
1338 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1346 pdq_bus_memaddr_t csr_base,
1354 const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1358 PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1359 PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1360 PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1361 PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1362 PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1363 PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1364 PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1365 PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1366 PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1368 pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1370 PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1373 PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1374 pdq->pdq_type = type;
1375 pdq->pdq_unit = unit;
1376 pdq->pdq_os_ctx = (void *) ctx;
1377 pdq->pdq_os_name = name;
1378 pdq->pdq_flags = PDQ_PRINTCHARS;
1380 * Allocate the additional data structures required by
1381 * the PDQ driver. Allocate a contiguous region of memory
1382 * for the descriptor block. We need to allocated enough
1383 * to guarantee that we will a get 8KB block of memory aligned
1384 * on a 8KB boundary. This turns to require that we allocate
1385 * (N*2 - 1 page) pages of memory. On machine with less than
1386 * a 8KB page size, it mean we will allocate more memory than
1387 * we need. The extra will be used for the unsolicited event
1388 * buffers (though on machines with 8KB pages we will to allocate
1389 * them separately since there will be nothing left overs.)
1391 p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1393 pdq_physaddr_t physaddr = PDQ_OS_VA_TO_PA(pdq, p);
1395 * Assert that we really got contiguous memory. This isn't really
1396 * needed on systems that actually have physical contiguous allocation
1397 * routines, but on those systems that don't ...
1399 for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1400 if (PDQ_OS_VA_TO_PA(pdq, p + idx) - physaddr != idx)
1401 goto cleanup_and_return;
1405 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1406 pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - physaddr];
1408 pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1409 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1412 if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1413 pdq->pdq_unsolicited_info.ui_events =
1414 (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1415 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1419 * Make sure everything got allocated. If not, free what did
1420 * get allocated and return.
1422 if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1424 if (p /* pdq->pdq_dbp */ != NULL)
1425 PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1426 if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1427 PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1428 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1429 PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1433 pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1434 pdq->pdq_command_info.ci_bufstart = (pdq_uint8_t *) pdq->pdq_dbp->pdqdb_command_pool;
1435 pdq->pdq_rx_info.rx_buffers = (void *) pdq->pdq_dbp->pdqdb_receive_buffers;
1437 pdq->pdq_host_smt_info.rx_buffers = (void *) pdq->pdq_dbp->pdqdb_host_smt_buffers;
1439 PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp));
1440 PDQ_PRINTF((" Recieve Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_receives));
1441 PDQ_PRINTF((" Transmit Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_transmits));
1442 PDQ_PRINTF((" Host SMT Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_host_smt));
1443 PDQ_PRINTF((" Command Response Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_command_responses));
1444 PDQ_PRINTF((" Command Request Queue = " PDQ_OS_PTR_FMT "\n", pdq->pdq_dbp->pdqdb_command_requests));
1445 PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1448 * Zero out the descriptor block. Not really required but
1449 * it pays to be neat. This will also zero out the consumer
1450 * block, command pool, and buffer pointers for the receive
1453 PDQ_OS_MEMZERO(pdq->pdq_dbp, sizeof(*pdq->pdq_dbp));
1456 * Initialize the CSR references.
1457 * the DEFAA (FutureBus+) skips a longword between registers
1459 pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1460 if (pdq->pdq_type == PDQ_DEFPA)
1461 pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1463 PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_PTR_FMT "\n", pdq->pdq_csrs.csr_base));
1464 PDQ_PRINTF((" Port Reset = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1465 pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1466 PDQ_PRINTF((" Host Data = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1467 pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1468 PDQ_PRINTF((" Port Control = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1469 pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1470 PDQ_PRINTF((" Port Data A = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1471 pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1472 PDQ_PRINTF((" Port Data B = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1473 pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1474 PDQ_PRINTF((" Port Status = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1475 pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1476 PDQ_PRINTF((" Host Int Type 0 = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1477 pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1478 PDQ_PRINTF((" Host Int Enable = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1479 pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1480 PDQ_PRINTF((" Type 2 Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1481 pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1482 PDQ_PRINTF((" Command Response Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1483 pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1484 PDQ_PRINTF((" Command Request Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1485 pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1486 PDQ_PRINTF((" Host SMT Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1487 pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1488 PDQ_PRINTF((" Unsolicited Producer = " PDQ_OS_PTR_FMT " [0x%08x]\n",
1489 pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1492 * Initialize the command information block
1494 pdq->pdq_command_info.ci_pa_bufstart = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_command_info.ci_bufstart);
1495 for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_command_requests)/sizeof(pdq->pdq_dbp->pdqdb_command_requests[0]); idx++) {
1496 pdq_txdesc_t *txd = &pdq->pdq_dbp->pdqdb_command_requests[idx];
1498 txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_bufstart;
1499 txd->txd_eop = txd->txd_sop = 1;
1502 for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_command_responses)/sizeof(pdq->pdq_dbp->pdqdb_command_responses[0]); idx++) {
1503 pdq_rxdesc_t *rxd = &pdq->pdq_dbp->pdqdb_command_responses[idx];
1505 rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_bufstart;
1507 rxd->rxd_seg_cnt = 0;
1508 rxd->rxd_seg_len_lo = 0;
1512 * Initialize the unsolicited event information block
1514 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1515 pdq->pdq_unsolicited_info.ui_pa_bufstart = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_unsolicited_info.ui_events);
1516 for (idx = 0; idx < sizeof(pdq->pdq_dbp->pdqdb_unsolicited_events)/sizeof(pdq->pdq_dbp->pdqdb_unsolicited_events[0]); idx++) {
1517 pdq_rxdesc_t *rxd = &pdq->pdq_dbp->pdqdb_unsolicited_events[idx];
1518 pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1521 rxd->rxd_seg_cnt = 0;
1522 rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16;
1523 rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1524 - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events;
1528 * Initialize the receive information blocks (normal and SMT).
1530 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1531 pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1533 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1534 pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1537 * Initialize the transmit information block.
1539 pdq->pdq_tx_hdr[0] = PDQ_FDDI_PH0;
1540 pdq->pdq_tx_hdr[1] = PDQ_FDDI_PH1;
1541 pdq->pdq_tx_hdr[2] = PDQ_FDDI_PH2;
1542 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1543 pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = sizeof(pdq->pdq_tx_hdr);
1544 pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1;
1545 pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_OS_VA_TO_PA(pdq, pdq->pdq_tx_hdr);
1547 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1548 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1551 * Stop the PDQ if it is running and put it into a known state.
1553 state = pdq_stop(pdq);
1555 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1556 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1558 * If the adapter is not the state we expect, then the initialization
1559 * failed. Cleanup and exit.
1561 #if defined(PDQVERBOSE)
1562 if (state == PDQS_HALTED) {
1563 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1564 printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1565 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1566 PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1567 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1568 PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1571 if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1572 goto cleanup_and_return;
1574 PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1575 pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1576 pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1577 pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1578 PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1579 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1580 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1581 PDQ_PRINTF(("PDQ Chip Revision = "));
1582 switch (pdq->pdq_chip_rev) {
1583 case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1584 case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1585 case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1586 default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));