2 * Copyright (c) 2009 Yahoo! Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * $FreeBSD: src/sys/dev/mps/mps.c,v 1.9 2010/12/10 21:45:10 ken Exp $
29 /* Communications core for LSI MPT2 */
31 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/mutex.h>
37 #include <sys/module.h>
41 #include <sys/malloc.h>
43 #include <sys/sysctl.h>
44 #include <sys/endian.h>
48 #include <bus/cam/scsi/scsi_all.h>
50 #include <dev/disk/mps/mpi/mpi2_type.h>
51 #include <dev/disk/mps/mpi/mpi2.h>
52 #include <dev/disk/mps/mpi/mpi2_ioc.h>
53 #include <dev/disk/mps/mpi/mpi2_cnfg.h>
54 #include <dev/disk/mps/mpsvar.h>
55 #include <dev/disk/mps/mps_table.h>
57 static void mps_startup(void *arg);
58 static void mps_startup_complete(struct mps_softc *sc, struct mps_command *cm);
59 static int mps_send_iocinit(struct mps_softc *sc);
60 static int mps_attach_log(struct mps_softc *sc);
61 static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *reply);
62 static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm);
63 static void mps_periodic(void *);
65 SYSCTL_NODE(_hw, OID_AUTO, mps, CTLFLAG_RD, 0, "MPS Driver Parameters");
67 MALLOC_DEFINE(M_MPT2, "mps", "mpt2 driver memory");
70 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of
71 * any state and back to its initialization state machine.
73 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
76 mps_hard_reset(struct mps_softc *sc)
79 int i, error, tries = 0;
81 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
83 /* Clear any pending interrupts */
84 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
86 /* Push the magic sequence */
88 while (tries++ < 20) {
89 for (i = 0; i < sizeof(mpt2_reset_magic); i++)
90 mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET,
95 reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
96 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) {
104 /* Send the actual reset. XXX need to refresh the reg? */
105 mps_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET,
106 reg | MPI2_DIAG_RESET_ADAPTER);
108 /* Wait up to 300 seconds in 50ms intervals */
110 for (i = 0; i < 60000; i++) {
112 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
113 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) {
121 mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0);
127 mps_soft_reset(struct mps_softc *sc)
130 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
132 mps_regwrite(sc, MPI2_DOORBELL_OFFSET,
133 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET <<
134 MPI2_DOORBELL_FUNCTION_SHIFT);
141 mps_transition_ready(struct mps_softc *sc)
144 int error, tries = 0;
146 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
149 while (tries++ < 5) {
150 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
151 mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg);
154 * Ensure the IOC is ready to talk. If it's not, try
157 if (reg & MPI2_DOORBELL_USED) {
163 /* Is the adapter owned by another peer? */
164 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) ==
165 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) {
166 device_printf(sc->mps_dev, "IOC is under the control "
167 "of another peer host, aborting initialization.\n");
171 state = reg & MPI2_IOC_STATE_MASK;
172 if (state == MPI2_IOC_STATE_READY) {
176 } else if (state == MPI2_IOC_STATE_FAULT) {
177 mps_dprint(sc, MPS_INFO, "IOC in fault state 0x%x\n",
178 state & MPI2_DOORBELL_FAULT_CODE_MASK);
180 } else if (state == MPI2_IOC_STATE_OPERATIONAL) {
181 /* Need to take ownership */
183 } else if (state == MPI2_IOC_STATE_RESET) {
184 /* Wait a bit, IOC might be in transition */
185 mps_dprint(sc, MPS_FAULT,
186 "IOC in unexpected reset state\n");
188 mps_dprint(sc, MPS_FAULT,
189 "IOC in unknown state 0x%x\n", state);
194 /* Wait 50ms for things to settle down. */
199 device_printf(sc->mps_dev, "Cannot transition IOC to ready\n");
205 mps_transition_operational(struct mps_softc *sc)
210 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
213 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
214 mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg);
216 state = reg & MPI2_IOC_STATE_MASK;
217 if (state != MPI2_IOC_STATE_READY) {
218 if ((error = mps_transition_ready(sc)) != 0)
222 error = mps_send_iocinit(sc);
226 /* Wait for the chip to ACK a word that we've put into its FIFO */
228 mps_wait_db_ack(struct mps_softc *sc)
232 for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) {
233 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
234 MPI2_HIS_SYS2IOC_DB_STATUS) == 0)
241 /* Wait for the chip to signal that the next word in its FIFO can be fetched */
243 mps_wait_db_int(struct mps_softc *sc)
247 for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) {
248 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
249 MPI2_HIS_IOC2SYS_DB_STATUS) != 0)
256 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */
258 mps_request_sync(struct mps_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
259 int req_sz, int reply_sz, int timeout)
263 int i, count, ioc_sz, residual;
266 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
269 if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
273 * Announce that a message is coming through the doorbell. Messages
274 * are pushed at 32bit words, so round up if needed.
276 count = (req_sz + 3) / 4;
277 mps_regwrite(sc, MPI2_DOORBELL_OFFSET,
278 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) |
279 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT));
282 if (mps_wait_db_int(sc) ||
283 (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) {
284 mps_dprint(sc, MPS_FAULT, "Doorbell failed to activate\n");
287 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
288 if (mps_wait_db_ack(sc) != 0) {
289 mps_dprint(sc, MPS_FAULT, "Doorbell handshake failed\n");
294 /* Clock out the message data synchronously in 32-bit dwords*/
295 data32 = (uint32_t *)req;
296 for (i = 0; i < count; i++) {
297 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, data32[i]);
298 if (mps_wait_db_ack(sc) != 0) {
299 mps_dprint(sc, MPS_FAULT,
300 "Timeout while writing doorbell\n");
306 /* Clock in the reply in 16-bit words. The total length of the
307 * message is always in the 4th byte, so clock out the first 2 words
308 * manually, then loop the rest.
310 data16 = (uint16_t *)reply;
311 if (mps_wait_db_int(sc) != 0) {
312 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 0\n");
316 mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
317 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
318 if (mps_wait_db_int(sc) != 0) {
319 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 1\n");
323 mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
324 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
326 /* Number of 32bit words in the message */
327 ioc_sz = reply->MsgLength;
330 * Figure out how many 16bit words to clock in without overrunning.
331 * The precision loss with dividing reply_sz can safely be
332 * ignored because the messages can only be multiples of 32bits.
335 count = MIN((reply_sz / 4), ioc_sz) * 2;
336 if (count < ioc_sz * 2) {
337 residual = ioc_sz * 2 - count;
338 mps_dprint(sc, MPS_FAULT, "Driver error, throwing away %d "
339 "residual message words\n", residual);
342 for (i = 2; i < count; i++) {
343 if (mps_wait_db_int(sc) != 0) {
344 mps_dprint(sc, MPS_FAULT,
345 "Timeout reading doorbell %d\n", i);
348 data16[i] = mps_regread(sc, MPI2_DOORBELL_OFFSET) &
349 MPI2_DOORBELL_DATA_MASK;
350 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
354 * Pull out residual words that won't fit into the provided buffer.
355 * This keeps the chip from hanging due to a driver programming
359 if (mps_wait_db_int(sc) != 0) {
360 mps_dprint(sc, MPS_FAULT,
361 "Timeout reading doorbell\n");
364 (void)mps_regread(sc, MPI2_DOORBELL_OFFSET);
365 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
369 if (mps_wait_db_int(sc) != 0) {
370 mps_dprint(sc, MPS_FAULT, "Timeout waiting to exit doorbell\n");
373 if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
374 mps_dprint(sc, MPS_FAULT, "Warning, doorbell still active\n");
375 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
381 mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm)
384 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
386 mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
387 cm->cm_desc.Words.Low);
388 mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
389 cm->cm_desc.Words.High);
393 mps_request_polled(struct mps_softc *sc, struct mps_command *cm)
395 int error, timeout = 0;
399 cm->cm_flags |= MPS_CM_FLAGS_POLLED;
400 cm->cm_complete = NULL;
401 mps_map_command(sc, cm);
403 while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) {
406 if (timeout++ > 1000) {
407 mps_dprint(sc, MPS_FAULT, "polling failed\n");
417 * Just the FACTS, ma'am.
420 mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
422 MPI2_DEFAULT_REPLY *reply;
423 MPI2_IOC_FACTS_REQUEST request;
424 int error, req_sz, reply_sz;
426 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
428 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST);
429 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
430 reply = (MPI2_DEFAULT_REPLY *)facts;
432 bzero(&request, req_sz);
433 request.Function = MPI2_FUNCTION_IOC_FACTS;
434 error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
440 mps_get_portfacts(struct mps_softc *sc, MPI2_PORT_FACTS_REPLY *facts, int port)
442 MPI2_PORT_FACTS_REQUEST *request;
443 MPI2_PORT_FACTS_REPLY *reply;
444 struct mps_command *cm;
447 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
449 if ((cm = mps_alloc_command(sc)) == NULL)
451 request = (MPI2_PORT_FACTS_REQUEST *)cm->cm_req;
452 request->Function = MPI2_FUNCTION_PORT_FACTS;
453 request->PortNumber = port;
454 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
456 error = mps_request_polled(sc, cm);
457 reply = (MPI2_PORT_FACTS_REPLY *)cm->cm_reply;
458 if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
460 bcopy(reply, facts, sizeof(MPI2_PORT_FACTS_REPLY));
461 mps_free_command(sc, cm);
467 mps_send_iocinit(struct mps_softc *sc)
469 MPI2_IOC_INIT_REQUEST init;
470 MPI2_DEFAULT_REPLY reply;
471 int req_sz, reply_sz, error;
473 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
475 req_sz = sizeof(MPI2_IOC_INIT_REQUEST);
476 reply_sz = sizeof(MPI2_IOC_INIT_REPLY);
477 bzero(&init, req_sz);
478 bzero(&reply, reply_sz);
481 * Fill in the init block. Note that most addresses are
482 * deliberately in the lower 32bits of memory. This is a micro-
483 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
485 init.Function = MPI2_FUNCTION_IOC_INIT;
486 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
487 init.MsgVersion = MPI2_VERSION;
488 init.HeaderVersion = MPI2_HEADER_VERSION;
489 init.SystemRequestFrameSize = sc->facts->IOCRequestFrameSize;
490 init.ReplyDescriptorPostQueueDepth = sc->pqdepth;
491 init.ReplyFreeQueueDepth = sc->fqdepth;
492 init.SenseBufferAddressHigh = 0;
493 init.SystemReplyAddressHigh = 0;
494 init.SystemRequestFrameBaseAddress.High = 0;
495 init.SystemRequestFrameBaseAddress.Low = (uint32_t)sc->req_busaddr;
496 init.ReplyDescriptorPostQueueAddress.High = 0;
497 init.ReplyDescriptorPostQueueAddress.Low = (uint32_t)sc->post_busaddr;
498 init.ReplyFreeQueueAddress.High = 0;
499 init.ReplyFreeQueueAddress.Low = (uint32_t)sc->free_busaddr;
500 init.TimeStamp.High = 0;
501 init.TimeStamp.Low = (uint32_t)time_second;
503 error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
504 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
507 mps_dprint(sc, MPS_INFO, "IOCInit status= 0x%x\n", reply.IOCStatus);
512 mps_send_portenable(struct mps_softc *sc)
514 MPI2_PORT_ENABLE_REQUEST *request;
515 struct mps_command *cm;
517 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
519 if ((cm = mps_alloc_command(sc)) == NULL)
521 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req;
522 request->Function = MPI2_FUNCTION_PORT_ENABLE;
523 request->MsgFlags = 0;
525 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
526 cm->cm_complete = mps_startup_complete;
528 mps_enqueue_request(sc, cm);
533 mps_send_mur(struct mps_softc *sc)
541 mps_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
546 *addr = segs[0].ds_addr;
550 mps_alloc_queues(struct mps_softc *sc)
552 bus_addr_t queues_busaddr;
554 int qsize, fqsize, pqsize;
557 * The reply free queue contains 4 byte entries in multiples of 16 and
558 * aligned on a 16 byte boundary. There must always be an unused entry.
559 * This queue supplies fresh reply frames for the firmware to use.
561 * The reply descriptor post queue contains 8 byte entries in
562 * multiples of 16 and aligned on a 16 byte boundary. This queue
563 * contains filled-in reply frames sent from the firmware to the host.
565 * These two queues are allocated together for simplicity.
567 sc->fqdepth = roundup2((sc->num_replies + 1), 16);
568 sc->pqdepth = roundup2((sc->num_replies + 1), 16);
569 fqsize= sc->fqdepth * 4;
570 pqsize = sc->pqdepth * 8;
571 qsize = fqsize + pqsize;
573 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
574 16, 0, /* algnmnt, boundary */
575 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
576 BUS_SPACE_MAXADDR, /* highaddr */
577 NULL, NULL, /* filter, filterarg */
580 qsize, /* maxsegsize */
583 device_printf(sc->mps_dev, "Cannot allocate queues DMA tag\n");
586 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
588 device_printf(sc->mps_dev, "Cannot allocate queues memory\n");
591 bzero(queues, qsize);
592 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
593 mps_memaddr_cb, &queues_busaddr, 0);
595 sc->free_queue = (uint32_t *)queues;
596 sc->free_busaddr = queues_busaddr;
597 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
598 sc->post_busaddr = queues_busaddr + fqsize;
604 mps_alloc_replies(struct mps_softc *sc)
606 int rsize, num_replies;
609 * sc->num_replies should be one less than sc->fqdepth. We need to
610 * allocate space for sc->fqdepth replies, but only sc->num_replies
611 * replies can be used at once.
613 num_replies = max(sc->fqdepth, sc->num_replies);
615 rsize = sc->facts->ReplyFrameSize * num_replies * 4;
616 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
617 4, 0, /* algnmnt, boundary */
618 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
619 BUS_SPACE_MAXADDR, /* highaddr */
620 NULL, NULL, /* filter, filterarg */
623 rsize, /* maxsegsize */
626 device_printf(sc->mps_dev, "Cannot allocate replies DMA tag\n");
629 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
630 BUS_DMA_NOWAIT, &sc->reply_map)) {
631 device_printf(sc->mps_dev, "Cannot allocate replies memory\n");
634 bzero(sc->reply_frames, rsize);
635 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
636 mps_memaddr_cb, &sc->reply_busaddr, 0);
642 mps_alloc_requests(struct mps_softc *sc)
644 struct mps_command *cm;
645 struct mps_chain *chain;
648 rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4;
649 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
650 16, 0, /* algnmnt, boundary */
651 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
652 BUS_SPACE_MAXADDR, /* highaddr */
653 NULL, NULL, /* filter, filterarg */
656 rsize, /* maxsegsize */
659 device_printf(sc->mps_dev, "Cannot allocate request DMA tag\n");
662 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
663 BUS_DMA_NOWAIT, &sc->req_map)) {
664 device_printf(sc->mps_dev, "Cannot allocate request memory\n");
667 bzero(sc->req_frames, rsize);
668 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
669 mps_memaddr_cb, &sc->req_busaddr, 0);
671 rsize = sc->facts->IOCRequestFrameSize * MPS_CHAIN_FRAMES * 4;
672 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
673 16, 0, /* algnmnt, boundary */
674 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
675 BUS_SPACE_MAXADDR, /* highaddr */
676 NULL, NULL, /* filter, filterarg */
679 rsize, /* maxsegsize */
682 device_printf(sc->mps_dev, "Cannot allocate chain DMA tag\n");
685 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
686 BUS_DMA_NOWAIT, &sc->chain_map)) {
687 device_printf(sc->mps_dev, "Cannot allocate chain memory\n");
690 bzero(sc->chain_frames, rsize);
691 bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize,
692 mps_memaddr_cb, &sc->chain_busaddr, 0);
694 rsize = MPS_SENSE_LEN * sc->num_reqs;
695 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
696 1, 0, /* algnmnt, boundary */
697 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
698 BUS_SPACE_MAXADDR, /* highaddr */
699 NULL, NULL, /* filter, filterarg */
702 rsize, /* maxsegsize */
705 device_printf(sc->mps_dev, "Cannot allocate sense DMA tag\n");
708 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
709 BUS_DMA_NOWAIT, &sc->sense_map)) {
710 device_printf(sc->mps_dev, "Cannot allocate sense memory\n");
713 bzero(sc->sense_frames, rsize);
714 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
715 mps_memaddr_cb, &sc->sense_busaddr, 0);
717 sc->chains = kmalloc(sizeof(struct mps_chain) * MPS_CHAIN_FRAMES,
718 M_MPT2, M_WAITOK | M_ZERO);
719 for (i = 0; i < MPS_CHAIN_FRAMES; i++) {
720 chain = &sc->chains[i];
721 chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames +
722 i * sc->facts->IOCRequestFrameSize * 4);
723 chain->chain_busaddr = sc->chain_busaddr +
724 i * sc->facts->IOCRequestFrameSize * 4;
725 mps_free_chain(sc, chain);
728 /* XXX Need to pick a more precise value */
729 nsegs = (MAXPHYS / PAGE_SIZE) + 1;
730 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */
731 1, 0, /* algnmnt, boundary */
732 BUS_SPACE_MAXADDR, /* lowaddr */
733 BUS_SPACE_MAXADDR, /* highaddr */
734 NULL, NULL, /* filter, filterarg */
735 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
736 nsegs, /* nsegments */
737 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
738 BUS_DMA_ALLOCNOW, /* flags */
740 device_printf(sc->mps_dev, "Cannot allocate sense DMA tag\n");
745 * SMID 0 cannot be used as a free command per the firmware spec.
746 * Just drop that command instead of risking accounting bugs.
748 sc->commands = kmalloc(sizeof(struct mps_command) * sc->num_reqs,
749 M_MPT2, M_WAITOK | M_ZERO);
750 for (i = 1; i < sc->num_reqs; i++) {
751 cm = &sc->commands[i];
752 cm->cm_req = sc->req_frames +
753 i * sc->facts->IOCRequestFrameSize * 4;
754 cm->cm_req_busaddr = sc->req_busaddr +
755 i * sc->facts->IOCRequestFrameSize * 4;
756 cm->cm_sense = &sc->sense_frames[i];
757 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPS_SENSE_LEN;
758 cm->cm_desc.Default.SMID = i;
760 TAILQ_INIT(&cm->cm_chain_list);
761 callout_init(&cm->cm_callout);
763 /* XXX Is a failure here a critical problem? */
764 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0)
765 mps_free_command(sc, cm);
776 mps_init_queues(struct mps_softc *sc)
780 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
783 * According to the spec, we need to use one less reply than we
784 * have space for on the queue. So sc->num_replies (the number we
785 * use) should be less than sc->fqdepth (allocated size).
787 if (sc->num_replies >= sc->fqdepth)
791 * Initialize all of the free queue entries.
793 for (i = 0; i < sc->fqdepth; i++)
794 sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4);
795 sc->replyfreeindex = sc->num_replies;
801 mps_attach(struct mps_softc *sc)
804 char tmpstr[80], tmpstr2[80];
807 * Grab any tunable-set debug level so that tracing works as early
810 ksnprintf(tmpstr, sizeof(tmpstr), "hw.mps.%d.debug_level",
811 device_get_unit(sc->mps_dev));
812 TUNABLE_INT_FETCH(tmpstr, &sc->mps_debug);
813 ksnprintf(tmpstr, sizeof(tmpstr), "hw.mps.%d.allow_multiple_tm_cmds",
814 device_get_unit(sc->mps_dev));
815 TUNABLE_INT_FETCH(tmpstr, &sc->allow_multiple_tm_cmds);
817 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
819 lockinit(&sc->mps_lock, "MPT2SAS lock", 0, LK_CANRECURSE);
820 callout_init(&sc->periodic);
821 TAILQ_INIT(&sc->event_list);
824 * Setup the sysctl variable so the user can change the debug level
827 ksnprintf(tmpstr, sizeof(tmpstr), "MPS controller %d",
828 device_get_unit(sc->mps_dev));
829 ksnprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev));
831 sysctl_ctx_init(&sc->sysctl_ctx);
832 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
833 SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2, CTLFLAG_RD,
835 if (sc->sysctl_tree == NULL)
838 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
839 OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mps_debug, 0,
842 SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
843 OID_AUTO, "allow_multiple_tm_cmds", CTLFLAG_RW,
844 &sc->allow_multiple_tm_cmds, 0,
845 "allow multiple simultaneous task management cmds");
847 if ((error = mps_transition_ready(sc)) != 0)
850 sc->facts = kmalloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2,
852 if ((error = mps_get_iocfacts(sc, sc->facts)) != 0)
855 mps_print_iocfacts(sc, sc->facts);
857 mps_printf(sc, "Firmware: %02d.%02d.%02d.%02d\n",
858 sc->facts->FWVersion.Struct.Major,
859 sc->facts->FWVersion.Struct.Minor,
860 sc->facts->FWVersion.Struct.Unit,
861 sc->facts->FWVersion.Struct.Dev);
862 mps_printf(sc, "IOCCapabilities: %b\n",
863 (u_int)(sc->facts->IOCCapabilities),
864 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
865 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
866 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc");
869 * If the chip doesn't support event replay then a hard reset will be
870 * required to trigger a full discovery. Do the reset here then
871 * retransition to Ready. A hard reset might have already been done,
872 * but it doesn't hurt to do it again.
874 if ((sc->facts->IOCCapabilities &
875 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) {
877 if ((error = mps_transition_ready(sc)) != 0)
882 * Size the queues. Since the reply queues always need one free entry,
883 * we'll just deduct one reply message here.
885 sc->num_reqs = MIN(MPS_REQ_FRAMES, sc->facts->RequestCredit);
886 sc->num_replies = MIN(MPS_REPLY_FRAMES + MPS_EVT_REPLY_FRAMES,
887 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
888 TAILQ_INIT(&sc->req_list);
889 TAILQ_INIT(&sc->chain_list);
890 TAILQ_INIT(&sc->tm_list);
892 if (((error = mps_alloc_queues(sc)) != 0) ||
893 ((error = mps_alloc_replies(sc)) != 0) ||
894 ((error = mps_alloc_requests(sc)) != 0)) {
899 if (((error = mps_init_queues(sc)) != 0) ||
900 ((error = mps_transition_operational(sc)) != 0)) {
906 * Finish the queue initialization.
907 * These are set here instead of in mps_init_queues() because the
908 * IOC resets these values during the state transition in
909 * mps_transition_operational(). The free index is set to 1
910 * because the corresponding index in the IOC is set to 0, and the
911 * IOC treats the queues as full if both are set to the same value.
912 * Hence the reason that the queue can't hold all of the possible
915 sc->replypostindex = 0;
916 mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
917 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0);
919 sc->pfacts = kmalloc(sizeof(MPI2_PORT_FACTS_REPLY) *
920 sc->facts->NumberOfPorts, M_MPT2, M_ZERO|M_WAITOK);
921 for (i = 0; i < sc->facts->NumberOfPorts; i++) {
922 if ((error = mps_get_portfacts(sc, &sc->pfacts[i], i)) != 0) {
926 mps_print_portfacts(sc, &sc->pfacts[i]);
929 /* Attach the subsystems so they can prepare their event masks. */
930 /* XXX Should be dynamic so that IM/IR and user modules can attach */
931 if (((error = mps_attach_log(sc)) != 0) ||
932 ((error = mps_attach_sas(sc)) != 0) ||
933 ((error = mps_attach_user(sc)) != 0)) {
934 mps_printf(sc, "%s failed to attach all subsystems: error %d\n",
940 if ((error = mps_pci_setup_interrupts(sc)) != 0) {
945 /* Start the periodic watchdog check on the IOC Doorbell */
949 * The portenable will kick off discovery events that will drive the
950 * rest of the initialization process. The CAM/SAS module will
951 * hold up the boot sequence until discovery is complete.
953 sc->mps_ich.ich_func = mps_startup;
954 sc->mps_ich.ich_arg = sc;
955 if (config_intrhook_establish(&sc->mps_ich) != 0) {
956 mps_dprint(sc, MPS_FAULT, "Cannot establish MPS config hook\n");
964 mps_startup(void *arg)
966 struct mps_softc *sc;
968 sc = (struct mps_softc *)arg;
972 mps_send_portenable(sc);
976 /* Periodic watchdog. Is called with the driver lock already held. */
978 mps_periodic(void *arg)
980 struct mps_softc *sc;
983 sc = (struct mps_softc *)arg;
984 if (sc->mps_flags & MPS_FLAGS_SHUTDOWN)
987 db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
988 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
989 device_printf(sc->mps_dev, "IOC Fault 0x%08x, Resetting\n", db);
990 /* XXX Need to broaden this to re-initialize the chip */
992 db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
993 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
994 device_printf(sc->mps_dev, "Second IOC Fault 0x%08x, "
1000 callout_reset(&sc->periodic, MPS_PERIODIC_DELAY * hz, mps_periodic, sc);
1004 mps_startup_complete(struct mps_softc *sc, struct mps_command *cm)
1006 MPI2_PORT_ENABLE_REPLY *reply;
1008 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
1010 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply;
1011 if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
1012 mps_dprint(sc, MPS_FAULT, "Portenable failed\n");
1014 mps_free_command(sc, cm);
1015 config_intrhook_disestablish(&sc->mps_ich);
1020 mps_log_evt_handler(struct mps_softc *sc, uintptr_t data,
1021 MPI2_EVENT_NOTIFICATION_REPLY *event)
1023 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry;
1025 mps_print_event(sc, event);
1027 switch (event->Event) {
1028 case MPI2_EVENT_LOG_DATA:
1029 device_printf(sc->mps_dev, "MPI2_EVENT_LOG_DATA:\n");
1030 hexdump(event->EventData, event->EventDataLength, NULL, 0);
1032 case MPI2_EVENT_LOG_ENTRY_ADDED:
1033 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
1034 mps_dprint(sc, MPS_INFO, "MPI2_EVENT_LOG_ENTRY_ADDED event "
1035 "0x%x Sequence %d:\n", entry->LogEntryQualifier,
1036 entry->LogSequence);
1045 mps_attach_log(struct mps_softc *sc)
1050 setbit(events, MPI2_EVENT_LOG_DATA);
1051 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
1053 mps_register_events(sc, events, mps_log_evt_handler, NULL,
1060 mps_detach_log(struct mps_softc *sc)
1063 if (sc->mps_log_eh != NULL)
1064 mps_deregister_events(sc, sc->mps_log_eh);
1069 * Free all of the driver resources and detach submodules. Should be called
1070 * without the lock held.
1073 mps_free(struct mps_softc *sc)
1075 struct mps_command *cm;
1078 /* Turn off the watchdog */
1080 sc->mps_flags |= MPS_FLAGS_SHUTDOWN;
1082 #if 0 /* XXX swildner */
1083 /* Lock must not be held for this */
1084 callout_drain(&sc->periodic);
1087 if (((error = mps_detach_log(sc)) != 0) ||
1088 ((error = mps_detach_sas(sc)) != 0))
1091 /* Put the IOC back in the READY state. */
1093 if ((error = mps_send_mur(sc)) != 0) {
1099 if (sc->facts != NULL)
1100 kfree(sc->facts, M_MPT2);
1102 if (sc->pfacts != NULL)
1103 kfree(sc->pfacts, M_MPT2);
1105 if (sc->post_busaddr != 0)
1106 bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
1107 if (sc->post_queue != NULL)
1108 bus_dmamem_free(sc->queues_dmat, sc->post_queue,
1110 if (sc->queues_dmat != NULL)
1111 bus_dma_tag_destroy(sc->queues_dmat);
1113 if (sc->chain_busaddr != 0)
1114 bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
1115 if (sc->chain_frames != NULL)
1116 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,sc->chain_map);
1117 if (sc->chain_dmat != NULL)
1118 bus_dma_tag_destroy(sc->chain_dmat);
1120 if (sc->sense_busaddr != 0)
1121 bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
1122 if (sc->sense_frames != NULL)
1123 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,sc->sense_map);
1124 if (sc->sense_dmat != NULL)
1125 bus_dma_tag_destroy(sc->sense_dmat);
1127 if (sc->reply_busaddr != 0)
1128 bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
1129 if (sc->reply_frames != NULL)
1130 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,sc->reply_map);
1131 if (sc->reply_dmat != NULL)
1132 bus_dma_tag_destroy(sc->reply_dmat);
1134 if (sc->req_busaddr != 0)
1135 bus_dmamap_unload(sc->req_dmat, sc->req_map);
1136 if (sc->req_frames != NULL)
1137 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
1138 if (sc->req_dmat != NULL)
1139 bus_dma_tag_destroy(sc->req_dmat);
1141 if (sc->chains != NULL)
1142 kfree(sc->chains, M_MPT2);
1143 if (sc->commands != NULL) {
1144 for (i = 1; i < sc->num_reqs; i++) {
1145 cm = &sc->commands[i];
1146 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
1148 kfree(sc->commands, M_MPT2);
1150 if (sc->buffer_dmat != NULL)
1151 bus_dma_tag_destroy(sc->buffer_dmat);
1153 if (sc->sysctl_tree != NULL)
1154 sysctl_ctx_free(&sc->sysctl_ctx);
1156 lockuninit(&sc->mps_lock);
1162 mps_intr(void *data)
1164 struct mps_softc *sc;
1167 sc = (struct mps_softc *)data;
1168 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
1171 * Check interrupt status register to flush the bus. This is
1172 * needed for both INTx interrupts and driver-driven polling
1174 status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
1175 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0)
1179 mps_intr_locked(data);
1185 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the
1186 * chip. Hopefully this theory is correct.
1189 mps_intr_msi(void *data)
1191 struct mps_softc *sc;
1193 sc = (struct mps_softc *)data;
1195 mps_intr_locked(data);
1201 * The locking is overly broad and simplistic, but easy to deal with for now.
1204 mps_intr_locked(void *data)
1206 MPI2_REPLY_DESCRIPTORS_UNION *desc;
1207 struct mps_softc *sc;
1208 struct mps_command *cm = NULL;
1212 sc = (struct mps_softc *)data;
1214 pq = sc->replypostindex;
1218 desc = &sc->post_queue[pq];
1219 flags = desc->Default.ReplyFlags &
1220 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1221 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1222 || (desc->Words.High == 0xffffffff))
1226 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
1227 cm = &sc->commands[desc->SCSIIOSuccess.SMID];
1228 cm->cm_reply = NULL;
1230 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY:
1236 * Re-compose the reply address from the address
1237 * sent back from the chip. The ReplyFrameAddress
1238 * is the lower 32 bits of the physical address of
1239 * particular reply frame. Convert that address to
1240 * host format, and then use that to provide the
1241 * offset against the virtual address base
1242 * (sc->reply_frames).
1244 baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
1245 reply = sc->reply_frames +
1246 (baddr - ((uint32_t)sc->reply_busaddr));
1248 * Make sure the reply we got back is in a valid
1249 * range. If not, go ahead and panic here, since
1250 * we'll probably panic as soon as we deference the
1251 * reply pointer anyway.
1253 if ((reply < sc->reply_frames)
1254 || (reply > (sc->reply_frames +
1255 (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) {
1256 kprintf("%s: WARNING: reply %p out of range!\n",
1258 kprintf("%s: reply_frames %p, fqdepth %d, "
1259 "frame size %d\n", __func__,
1260 sc->reply_frames, sc->fqdepth,
1261 sc->facts->ReplyFrameSize * 4);
1262 kprintf("%s: baddr %#x,\n", __func__, baddr);
1263 panic("Reply address out of range");
1265 if (desc->AddressReply.SMID == 0) {
1266 mps_dispatch_event(sc, baddr,
1267 (MPI2_EVENT_NOTIFICATION_REPLY *) reply);
1269 cm = &sc->commands[desc->AddressReply.SMID];
1270 cm->cm_reply = reply;
1272 desc->AddressReply.ReplyFrameAddress;
1276 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS:
1277 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER:
1278 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS:
1281 device_printf(sc->mps_dev, "Unhandled reply 0x%x\n",
1282 desc->Default.ReplyFlags);
1288 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
1289 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
1291 if (cm->cm_complete != NULL)
1292 cm->cm_complete(sc, cm);
1294 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP)
1298 desc->Words.Low = 0xffffffff;
1299 desc->Words.High = 0xffffffff;
1300 if (++pq >= sc->pqdepth)
1304 if (pq != sc->replypostindex) {
1305 mps_dprint(sc, MPS_INFO, "writing postindex %d\n", pq);
1306 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, pq);
1307 sc->replypostindex = pq;
1314 mps_dispatch_event(struct mps_softc *sc, uintptr_t data,
1315 MPI2_EVENT_NOTIFICATION_REPLY *reply)
1317 struct mps_event_handle *eh;
1318 int event, handled = 0;
1320 event = reply->Event;
1321 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
1322 if (isset(eh->mask, event)) {
1323 eh->callback(sc, data, reply);
1329 device_printf(sc->mps_dev, "Unhandled event 0x%x\n", event);
1333 * For both register_events and update_events, the caller supplies a bitmap
1334 * of events that it _wants_. These functions then turn that into a bitmask
1335 * suitable for the controller.
1338 mps_register_events(struct mps_softc *sc, uint8_t *mask,
1339 mps_evt_callback_t *cb, void *data, struct mps_event_handle **handle)
1341 struct mps_event_handle *eh;
1344 eh = kmalloc(sizeof(struct mps_event_handle), M_MPT2, M_WAITOK|M_ZERO);
1347 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
1349 error = mps_update_events(sc, eh, mask);
1356 mps_update_events(struct mps_softc *sc, struct mps_event_handle *handle,
1359 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
1360 MPI2_EVENT_NOTIFICATION_REPLY *reply;
1361 struct mps_command *cm;
1362 struct mps_event_handle *eh;
1365 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
1367 if ((mask != NULL) && (handle != NULL))
1368 bcopy(mask, &handle->mask[0], 16);
1369 memset(sc->event_mask, 0xff, 16);
1371 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
1372 for (i = 0; i < 16; i++)
1373 sc->event_mask[i] &= ~eh->mask[i];
1376 if ((cm = mps_alloc_command(sc)) == NULL)
1378 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
1379 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
1380 evtreq->MsgFlags = 0;
1381 evtreq->SASBroadcastPrimitiveMasks = 0;
1382 #ifdef MPS_DEBUG_ALL_EVENTS
1384 u_char fullmask[16];
1385 memset(fullmask, 0x00, 16);
1386 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16);
1389 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16);
1391 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1394 error = mps_request_polled(sc, cm);
1395 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
1396 if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
1398 mps_print_event(sc, reply);
1400 mps_free_command(sc, cm);
1405 mps_deregister_events(struct mps_softc *sc, struct mps_event_handle *handle)
1408 TAILQ_REMOVE(&sc->event_list, handle, eh_list);
1409 kfree(handle, M_MPT2);
1410 return (mps_update_events(sc, NULL, NULL));
1414 * Add a chain element as the next SGE for the specified command.
1415 * Reset cm_sge and cm_sgesize to indicate all the available space.
1418 mps_add_chain(struct mps_command *cm)
1420 MPI2_SGE_CHAIN32 *sgc;
1421 struct mps_chain *chain;
1424 if (cm->cm_sglsize < MPS_SGC_SIZE)
1425 panic("MPS: Need SGE Error Code\n");
1427 chain = mps_alloc_chain(cm->cm_sc);
1431 space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4;
1434 * Note: a double-linked list is used to make it easier to
1435 * walk for debugging.
1437 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
1439 sgc = (MPI2_SGE_CHAIN32 *)&cm->cm_sge->MpiChain;
1440 sgc->Length = space;
1441 sgc->NextChainOffset = 0;
1442 sgc->Flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT;
1443 sgc->Address = chain->chain_busaddr;
1445 cm->cm_sge = (MPI2_SGE_IO_UNION *)&chain->chain->MpiSimple;
1446 cm->cm_sglsize = space;
1451 * Add one scatter-gather element (chain, simple, transaction context)
1452 * to the scatter-gather list for a command. Maintain cm_sglsize and
1453 * cm_sge as the remaining size and pointer to the next SGE to fill
1457 mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft)
1459 MPI2_SGE_TRANSACTION_UNION *tc = sgep;
1460 MPI2_SGE_SIMPLE64 *sge = sgep;
1463 type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK);
1467 case MPI2_SGE_FLAGS_TRANSACTION_ELEMENT: {
1468 if (len != tc->DetailsLength + 4)
1469 panic("TC %p length %u or %zu?", tc,
1470 tc->DetailsLength + 4, len);
1473 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
1474 /* Driver only uses 32-bit chain elements */
1475 if (len != MPS_SGC_SIZE)
1476 panic("CHAIN %p length %u or %zu?", sgep,
1479 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
1480 /* Driver only uses 64-bit SGE simple elements */
1482 if (len != MPS_SGE64_SIZE)
1483 panic("SGE simple %p length %u or %zu?", sge,
1484 MPS_SGE64_SIZE, len);
1485 if (((sge->FlagsLength >> MPI2_SGE_FLAGS_SHIFT) &
1486 MPI2_SGE_FLAGS_ADDRESS_SIZE) == 0)
1487 panic("SGE simple %p flags %02x not marked 64-bit?",
1489 (u_int)(sge->FlagsLength >> MPI2_SGE_FLAGS_SHIFT));
1493 panic("Unexpected SGE %p, flags %02x", tc, tc->Flags);
1498 * case 1: 1 more segment, enough room for it
1499 * case 2: 2 more segments, enough room for both
1500 * case 3: >=2 more segments, only enough room for 1 and a chain
1501 * case 4: >=1 more segment, enough room for only a chain
1502 * case 5: >=1 more segment, no room for anything (error)
1506 * There should be room for at least a chain element, or this
1507 * code is buggy. Case (5).
1509 if (cm->cm_sglsize < MPS_SGC_SIZE)
1510 panic("MPS: Need SGE Error Code\n");
1512 if (segsleft >= 2 &&
1513 cm->cm_sglsize < len + MPS_SGC_SIZE + MPS_SGE64_SIZE) {
1515 * There are 2 or more segments left to add, and only
1516 * enough room for 1 and a chain. Case (3).
1518 * Mark as last element in this chain if necessary.
1520 if (type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
1522 (MPI2_SGE_FLAGS_LAST_ELEMENT << MPI2_SGE_FLAGS_SHIFT);
1526 * Add the item then a chain. Do the chain now,
1527 * rather than on the next iteration, to simplify
1528 * understanding the code.
1530 cm->cm_sglsize -= len;
1531 bcopy(sgep, cm->cm_sge, len);
1532 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
1533 return (mps_add_chain(cm));
1536 if (segsleft >= 1 && cm->cm_sglsize < len + MPS_SGC_SIZE) {
1538 * 1 or more segment, enough room for only a chain.
1539 * Hope the previous element wasn't a Simple entry
1540 * that needed to be marked with
1541 * MPI2_SGE_FLAGS_LAST_ELEMENT. Case (4).
1543 if ((error = mps_add_chain(cm)) != 0)
1548 /* Case 1: 1 more segment, enough room for it. */
1549 if (segsleft == 1 && cm->cm_sglsize < len)
1550 panic("1 seg left and no room? %u versus %zu",
1551 cm->cm_sglsize, len);
1553 /* Case 2: 2 more segments, enough room for both */
1554 if (segsleft == 2 && cm->cm_sglsize < len + MPS_SGE64_SIZE)
1555 panic("2 segs left and no room? %u versus %zu",
1556 cm->cm_sglsize, len);
1559 if (segsleft == 1 && type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
1561 * Last element of the last segment of the entire
1564 sge->FlagsLength |= ((MPI2_SGE_FLAGS_LAST_ELEMENT |
1565 MPI2_SGE_FLAGS_END_OF_BUFFER |
1566 MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
1569 cm->cm_sglsize -= len;
1570 bcopy(sgep, cm->cm_sge, len);
1571 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
1576 * Add one dma segment to the scatter-gather list for a command.
1579 mps_add_dmaseg(struct mps_command *cm, vm_paddr_t pa, size_t len, u_int flags,
1582 MPI2_SGE_SIMPLE64 sge;
1585 * This driver always uses 64-bit address elements for
1588 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_ADDRESS_SIZE;
1589 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT);
1590 mps_from_u64(pa, &sge.Address);
1592 return (mps_push_sge(cm, &sge, sizeof sge, segsleft));
1596 mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1598 struct mps_softc *sc;
1599 struct mps_command *cm;
1600 u_int i, dir, sflags;
1602 cm = (struct mps_command *)arg;
1606 * In this case, just print out a warning and let the chip tell the
1607 * user they did the wrong thing.
1609 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
1610 mps_printf(sc, "%s: warning: busdma returned %d segments, "
1611 "more than the %d allowed\n", __func__, nsegs,
1616 * Set up DMA direction flags. Note that we don't support
1617 * bi-directional transfers, with the exception of SMP passthrough.
1620 if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) {
1622 * We have to add a special case for SMP passthrough, there
1623 * is no easy way to generically handle it. The first
1624 * S/G element is used for the command (therefore the
1625 * direction bit needs to be set). The second one is used
1626 * for the reply. We'll leave it to the caller to make
1627 * sure we only have two buffers.
1630 * Even though the busdma man page says it doesn't make
1631 * sense to have both direction flags, it does in this case.
1632 * We have one s/g element being accessed in each direction.
1634 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD;
1637 * Set the direction flag on the first buffer in the SMP
1638 * passthrough request. We'll clear it for the second one.
1640 sflags |= MPI2_SGE_FLAGS_DIRECTION |
1641 MPI2_SGE_FLAGS_END_OF_BUFFER;
1642 } else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) {
1643 sflags |= MPI2_SGE_FLAGS_DIRECTION;
1644 dir = BUS_DMASYNC_PREWRITE;
1646 dir = BUS_DMASYNC_PREREAD;
1648 for (i = 0; i < nsegs; i++) {
1649 if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS)
1651 sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
1653 error = mps_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
1656 /* Resource shortage, roll back! */
1657 mps_printf(sc, "out of chain frames\n");
1662 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
1663 mps_enqueue_request(sc, cm);
1669 mps_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
1672 mps_data_cb(arg, segs, nsegs, error);
1676 * Note that the only error path here is from bus_dmamap_load(), which can
1677 * return EINPROGRESS if it is waiting for resources.
1680 mps_map_command(struct mps_softc *sc, struct mps_command *cm)
1682 MPI2_SGE_SIMPLE32 *sge;
1685 if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) {
1686 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
1687 &cm->cm_uio, mps_data_cb2, cm, 0);
1688 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
1689 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
1690 cm->cm_data, cm->cm_length, mps_data_cb, cm, 0);
1692 /* Add a zero-length element as needed */
1693 if (cm->cm_sge != NULL) {
1694 sge = (MPI2_SGE_SIMPLE32 *)cm->cm_sge;
1695 sge->FlagsLength = (MPI2_SGE_FLAGS_LAST_ELEMENT |
1696 MPI2_SGE_FLAGS_END_OF_BUFFER |
1697 MPI2_SGE_FLAGS_END_OF_LIST |
1698 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1699 MPI2_SGE_FLAGS_SHIFT;
1702 mps_enqueue_request(sc, cm);
1709 * The MPT driver had a verbose interface for config pages. In this driver,
1710 * reduce it to much simplier terms, similar to the Linux driver.
1713 mps_read_config_page(struct mps_softc *sc, struct mps_config_params *params)
1715 MPI2_CONFIG_REQUEST *req;
1716 struct mps_command *cm;
1719 if (sc->mps_flags & MPS_FLAGS_BUSY) {
1723 cm = mps_alloc_command(sc);
1728 req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
1729 req->Function = MPI2_FUNCTION_CONFIG;
1730 req->Action = params->action;
1732 req->ChainOffset = 0;
1733 req->PageAddress = params->page_address;
1734 if (params->hdr.Ext.ExtPageType != 0) {
1735 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
1737 hdr = ¶ms->hdr.Ext;
1738 req->ExtPageType = hdr->ExtPageType;
1739 req->ExtPageLength = hdr->ExtPageLength;
1740 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
1741 req->Header.PageLength = 0; /* Must be set to zero */
1742 req->Header.PageNumber = hdr->PageNumber;
1743 req->Header.PageVersion = hdr->PageVersion;
1745 MPI2_CONFIG_PAGE_HEADER *hdr;
1747 hdr = ¶ms->hdr.Struct;
1748 req->Header.PageType = hdr->PageType;
1749 req->Header.PageNumber = hdr->PageNumber;
1750 req->Header.PageLength = hdr->PageLength;
1751 req->Header.PageVersion = hdr->PageVersion;
1754 cm->cm_data = params->buffer;
1755 cm->cm_length = params->length;
1756 cm->cm_sge = &req->PageBufferSGE;
1757 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
1758 cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
1759 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1761 cm->cm_complete_data = params;
1762 if (params->callback != NULL) {
1763 cm->cm_complete = mps_config_complete;
1764 return (mps_map_command(sc, cm));
1766 cm->cm_complete = NULL;
1767 cm->cm_flags |= MPS_CM_FLAGS_WAKEUP;
1768 if ((error = mps_map_command(sc, cm)) != 0)
1770 lksleep(cm, &sc->mps_lock, 0, "mpswait", 0);
1771 mps_config_complete(sc, cm);
1778 mps_write_config_page(struct mps_softc *sc, struct mps_config_params *params)
1784 mps_config_complete(struct mps_softc *sc, struct mps_command *cm)
1786 MPI2_CONFIG_REPLY *reply;
1787 struct mps_config_params *params;
1789 params = cm->cm_complete_data;
1791 if (cm->cm_data != NULL) {
1792 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
1793 BUS_DMASYNC_POSTREAD);
1794 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
1797 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
1798 params->status = reply->IOCStatus;
1799 if (params->hdr.Ext.ExtPageType != 0) {
1800 params->hdr.Ext.ExtPageType = reply->ExtPageType;
1801 params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
1803 params->hdr.Struct.PageType = reply->Header.PageType;
1804 params->hdr.Struct.PageNumber = reply->Header.PageNumber;
1805 params->hdr.Struct.PageLength = reply->Header.PageLength;
1806 params->hdr.Struct.PageVersion = reply->Header.PageVersion;
1809 mps_free_command(sc, cm);
1810 if (params->callback != NULL)
1811 params->callback(sc, params);