From: Matthew Dillon Date: Sun, 16 Aug 2009 01:57:23 +0000 (-0700) Subject: MPI Driver update - additional merge work. X-Git-Tag: v2.4.0~241 X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/commitdiff_plain/2545bca05842b5052adad30fb29fbdfea60b66c5 MPI Driver update - additional merge work. Ported-from: FreeBSD Submitted-by: Alexander Polakov --- diff --git a/sys/dev/disk/mpt/Makefile b/sys/dev/disk/mpt/Makefile new file mode 100644 index 0000000000..9531782863 --- /dev/null +++ b/sys/dev/disk/mpt/Makefile @@ -0,0 +1,10 @@ +# $FreeBSD: src/sys/modules/mpt/Makefile,v 1.3 2008/05/06 20:49:53 jhb Exp $ + +.PATH: ${.CURDIR} + +KMOD= mpt +SRCS= bus_if.h device_if.h pci_if.h \ + opt_cam.h opt_ddb.h \ + mpt.c mpt_cam.c mpt_debug.c mpt_pci.c mpt_raid.c mpt_user.c + +.include diff --git a/sys/dev/disk/mpt/mpilib/mpi_inb.h b/sys/dev/disk/mpt/mpilib/mpi_inb.h new file mode 100644 index 0000000000..4d7e7b2ecb --- /dev/null +++ b/sys/dev/disk/mpt/mpilib/mpi_inb.h @@ -0,0 +1,250 @@ +/* $FreeBSD: src/sys/dev/mpt/mpilib/mpi_inb.h,v 1.1 2006/01/21 00:29:51 mjacob Exp $ */ +/*- + * Copyright (c) 2000-2005, LSI Logic Corporation and its contributors. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the name of the LSI Logic Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * + * Name: mpi_inb.h + * Title: MPI Inband structures and definitions + * Creation Date: September 30, 2003 + * + * mpi_inb.h Version: 01.05.01 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 05-11-04 01.03.01 Original release. + * 08-19-04 01.05.01 Original release for MPI v1.5. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI_INB_H +#define MPI_INB_H + +/****************************************************************************** +* +* I n b a n d M e s s a g e s +* +*******************************************************************************/ + + +/****************************************************************************/ +/* Inband Buffer Post Request */ +/****************************************************************************/ + +typedef struct _MSG_INBAND_BUFFER_POST_REQUEST +{ + U8 Reserved1; /* 00h */ + U8 BufferCount; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 Reserved4; /* 0Ch */ + SGE_TRANS_SIMPLE_UNION SGL; /* 10h */ +} MSG_INBAND_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_INBAND_BUFFER_POST_REQUEST, + MpiInbandBufferPostRequest_t , MPI_POINTER pMpiInbandBufferPostRequest_t; + + +typedef struct _WWN_FC_FORMAT +{ + U64 NodeName; /* 00h */ + U64 PortName; /* 08h */ +} WWN_FC_FORMAT, MPI_POINTER PTR_WWN_FC_FORMAT, + WwnFcFormat_t, MPI_POINTER pWwnFcFormat_t; + +typedef struct _WWN_SAS_FORMAT +{ + U64 WorldWideID; /* 00h */ + U32 Reserved1; /* 08h */ + U32 Reserved2; /* 0Ch */ +} WWN_SAS_FORMAT, MPI_POINTER PTR_WWN_SAS_FORMAT, + WwnSasFormat_t, MPI_POINTER pWwnSasFormat_t; + +typedef union _WWN_INBAND_FORMAT +{ + WWN_FC_FORMAT Fc; + WWN_SAS_FORMAT Sas; +} WWN_INBAND_FORMAT, MPI_POINTER PTR_WWN_INBAND_FORMAT, + WwnInbandFormat, MPI_POINTER pWwnInbandFormat; + + +/* Inband Buffer Post reply message */ + +typedef struct _MSG_INBAND_BUFFER_POST_REPLY +{ + U16 Reserved1; /* 00h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 Reserved4; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U32 TransferLength; /* 14h */ + U32 TransactionContext; /* 18h */ + WWN_INBAND_FORMAT Wwn; /* 1Ch */ + U32 IOCIdentifier[4]; /* 2Ch */ +} MSG_INBAND_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_INBAND_BUFFER_POST_REPLY, + MpiInbandBufferPostReply_t, MPI_POINTER pMpiInbandBufferPostReply_t; + + +/****************************************************************************/ +/* Inband Send Request */ +/****************************************************************************/ + +typedef struct _MSG_INBAND_SEND_REQUEST +{ + U16 Reserved1; /* 00h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 Reserved4; /* 0Ch */ + WWN_INBAND_FORMAT Wwn; /* 10h */ + U32 Reserved5; /* 20h */ + SGE_IO_UNION SGL; /* 24h */ +} MSG_INBAND_SEND_REQUEST, MPI_POINTER PTR_MSG_INBAND_SEND_REQUEST, + MpiInbandSendRequest_t , MPI_POINTER pMpiInbandSendRequest_t; + + +/* Inband Send reply message */ + +typedef struct _MSG_INBAND_SEND_REPLY +{ + U16 Reserved1; /* 00h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 Reserved4; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U32 ResponseLength; /* 14h */ +} MSG_INBAND_SEND_REPLY, MPI_POINTER PTR_MSG_INBAND_SEND_REPLY, + MpiInbandSendReply_t, MPI_POINTER pMpiInbandSendReply_t; + + +/****************************************************************************/ +/* Inband Response Request */ +/****************************************************************************/ + +typedef struct _MSG_INBAND_RSP_REQUEST +{ + U16 Reserved1; /* 00h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 Reserved4; /* 0Ch */ + WWN_INBAND_FORMAT Wwn; /* 10h */ + U32 IOCIdentifier[4]; /* 20h */ + U32 ResponseLength; /* 30h */ + SGE_IO_UNION SGL; /* 34h */ +} MSG_INBAND_RSP_REQUEST, MPI_POINTER PTR_MSG_INBAND_RSP_REQUEST, + MpiInbandRspRequest_t , MPI_POINTER pMpiInbandRspRequest_t; + + +/* Inband Response reply message */ + +typedef struct _MSG_INBAND_RSP_REPLY +{ + U16 Reserved1; /* 00h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 Reserved4; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ +} MSG_INBAND_RSP_REPLY, MPI_POINTER PTR_MSG_INBAND_RSP_REPLY, + MpiInbandRspReply_t, MPI_POINTER pMpiInbandRspReply_t; + + +/****************************************************************************/ +/* Inband Abort Request */ +/****************************************************************************/ + +typedef struct _MSG_INBAND_ABORT_REQUEST +{ + U8 Reserved1; /* 00h */ + U8 AbortType; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 Reserved4; /* 0Ch */ + U32 ContextToAbort; /* 10h */ +} MSG_INBAND_ABORT_REQUEST, MPI_POINTER PTR_MSG_INBAND_ABORT_REQUEST, + MpiInbandAbortRequest_t , MPI_POINTER pMpiInbandAbortRequest_t; + +#define MPI_INBAND_ABORT_TYPE_ALL_BUFFERS (0x00) +#define MPI_INBAND_ABORT_TYPE_EXACT_BUFFER (0x01) +#define MPI_INBAND_ABORT_TYPE_SEND_REQUEST (0x02) +#define MPI_INBAND_ABORT_TYPE_RESPONSE_REQUEST (0x03) + + +/* Inband Abort reply message */ + +typedef struct _MSG_INBAND_ABORT_REPLY +{ + U8 Reserved1; /* 00h */ + U8 AbortType; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 Reserved4; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ +} MSG_INBAND_ABORT_REPLY, MPI_POINTER PTR_MSG_INBAND_ABORT_REPLY, + MpiInbandAbortReply_t, MPI_POINTER pMpiInbandAbortReply_t; + + +#endif diff --git a/sys/dev/disk/mpt/mpilib/mpi_sas.h b/sys/dev/disk/mpt/mpilib/mpi_sas.h new file mode 100644 index 0000000000..75c5e426f6 --- /dev/null +++ b/sys/dev/disk/mpt/mpilib/mpi_sas.h @@ -0,0 +1,295 @@ +/* $FreeBSD: src/sys/dev/mpt/mpilib/mpi_sas.h,v 1.3 2007/06/03 22:58:27 scottl Exp $ */ +/*- + * Copyright (c) 2000-2005, LSI Logic Corporation and its contributors. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the name of the LSI Logic Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Name: mpi_sas.h + * Title: MPI Serial Attached SCSI structures and definitions + * Creation Date: August 19, 2004 + * + * mpi_sas.h Version: 01.05.04 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 08-19-04 01.05.01 Original release. + * 08-30-05 01.05.02 Added DeviceInfo bit for SEP. + * Added PrimFlags and Primitive field to SAS IO Unit + * Control request, and added a new operation code. + * 03-27-06 01.05.03 Added Force Full Discovery, Transmit Port Select Signal, + * and Remove Device operations to SAS IO Unit Control. + * Added DevHandle field to SAS IO Unit Control request and + * reply. + * 10-11-06 01.05.04 Fixed the name of a define for Operation field of SAS IO + * Unit Control request. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI_SAS_H +#define MPI_SAS_H + + +/* + * Values for SASStatus. + */ +#define MPI_SASSTATUS_SUCCESS (0x00) +#define MPI_SASSTATUS_UNKNOWN_ERROR (0x01) +#define MPI_SASSTATUS_INVALID_FRAME (0x02) +#define MPI_SASSTATUS_UTC_BAD_DEST (0x03) +#define MPI_SASSTATUS_UTC_BREAK_RECEIVED (0x04) +#define MPI_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05) +#define MPI_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06) +#define MPI_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07) +#define MPI_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08) +#define MPI_SASSTATUS_UTC_WRONG_DESTINATION (0x09) +#define MPI_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A) +#define MPI_SASSTATUS_LONG_INFORMATION_UNIT (0x0B) +#define MPI_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C) +#define MPI_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D) +#define MPI_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E) +#define MPI_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F) +#define MPI_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10) +#define MPI_SASSTATUS_DATA_OFFSET_ERROR (0x11) +#define MPI_SASSTATUS_SDSF_NAK_RECEIVED (0x12) +#define MPI_SASSTATUS_SDSF_CONNECTION_FAILED (0x13) +#define MPI_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14) + + +/* + * Values for the SAS DeviceInfo field used in SAS Device Status Change Event + * data and SAS IO Unit Configuration pages. + */ +#define MPI_SAS_DEVICE_INFO_SEP (0x00004000) +#define MPI_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define MPI_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000) +#define MPI_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800) +#define MPI_SAS_DEVICE_INFO_SSP_TARGET (0x00000400) +#define MPI_SAS_DEVICE_INFO_STP_TARGET (0x00000200) +#define MPI_SAS_DEVICE_INFO_SMP_TARGET (0x00000100) +#define MPI_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define MPI_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define MPI_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define MPI_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define MPI_SAS_DEVICE_INFO_SATA_HOST (0x00000008) + +#define MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define MPI_SAS_DEVICE_INFO_NO_DEVICE (0x00000000) +#define MPI_SAS_DEVICE_INFO_END_DEVICE (0x00000001) +#define MPI_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + + + +/***************************************************************************** +* +* S e r i a l A t t a c h e d S C S I M e s s a g e s +* +*****************************************************************************/ + +/****************************************************************************/ +/* Serial Management Protocol Passthrough Request */ +/****************************************************************************/ + +typedef struct _MSG_SMP_PASSTHROUGH_REQUEST +{ + U8 PassthroughFlags; /* 00h */ + U8 PhysicalPort; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 RequestDataLength; /* 04h */ + U8 ConnectionRate; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 Reserved1; /* 0Ch */ + U64 SASAddress; /* 10h */ + U32 Reserved2; /* 18h */ + U32 Reserved3; /* 1Ch */ + SGE_SIMPLE_UNION SGL; /* 20h */ +} MSG_SMP_PASSTHROUGH_REQUEST, MPI_POINTER PTR_MSG_SMP_PASSTHROUGH_REQUEST, + SmpPassthroughRequest_t, MPI_POINTER pSmpPassthroughRequest_t; + +/* values for PassthroughFlags field */ +#define MPI_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) + +/* values for ConnectionRate field */ +#define MPI_SMP_PT_REQ_CONNECT_RATE_NEGOTIATED (0x00) +#define MPI_SMP_PT_REQ_CONNECT_RATE_1_5 (0x08) +#define MPI_SMP_PT_REQ_CONNECT_RATE_3_0 (0x09) + + +/* Serial Management Protocol Passthrough Reply */ +typedef struct _MSG_SMP_PASSTHROUGH_REPLY +{ + U8 PassthroughFlags; /* 00h */ + U8 PhysicalPort; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U16 ResponseDataLength; /* 04h */ + U8 Reserved1; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U8 Reserved2; /* 0Ch */ + U8 SASStatus; /* 0Dh */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U32 Reserved3; /* 14h */ + U8 ResponseData[4]; /* 18h */ +} MSG_SMP_PASSTHROUGH_REPLY, MPI_POINTER PTR_MSG_SMP_PASSTHROUGH_REPLY, + SmpPassthroughReply_t, MPI_POINTER pSmpPassthroughReply_t; + +#define MPI_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80) + + +/****************************************************************************/ +/* SATA Passthrough Request */ +/****************************************************************************/ + +typedef struct _MSG_SATA_PASSTHROUGH_REQUEST +{ + U8 TargetID; /* 00h */ + U8 Bus; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 PassthroughFlags; /* 04h */ + U8 ConnectionRate; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 Reserved1; /* 0Ch */ + U32 Reserved2; /* 10h */ + U32 Reserved3; /* 14h */ + U32 DataLength; /* 18h */ + U8 CommandFIS[20]; /* 1Ch */ + SGE_SIMPLE_UNION SGL; /* 30h */ +} MSG_SATA_PASSTHROUGH_REQUEST, MPI_POINTER PTR_MSG_SATA_PASSTHROUGH_REQUEST, + SataPassthroughRequest_t, MPI_POINTER pSataPassthroughRequest_t; + +/* values for PassthroughFlags field */ +#define MPI_SATA_PT_REQ_PT_FLAGS_RESET_DEVICE (0x0200) +#define MPI_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100) +#define MPI_SATA_PT_REQ_PT_FLAGS_DMA_QUEUED (0x0080) +#define MPI_SATA_PT_REQ_PT_FLAGS_PACKET_COMMAND (0x0040) +#define MPI_SATA_PT_REQ_PT_FLAGS_DMA (0x0020) +#define MPI_SATA_PT_REQ_PT_FLAGS_PIO (0x0010) +#define MPI_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004) +#define MPI_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) +#define MPI_SATA_PT_REQ_PT_FLAGS_READ (0x0001) + +/* values for ConnectionRate field */ +#define MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED (0x00) +#define MPI_SATA_PT_REQ_CONNECT_RATE_1_5 (0x08) +#define MPI_SATA_PT_REQ_CONNECT_RATE_3_0 (0x09) + + +/* SATA Passthrough Reply */ +typedef struct _MSG_SATA_PASSTHROUGH_REPLY +{ + U8 TargetID; /* 00h */ + U8 Bus; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U16 PassthroughFlags; /* 04h */ + U8 Reserved1; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U8 Reserved2; /* 0Ch */ + U8 SASStatus; /* 0Dh */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U8 StatusFIS[20]; /* 14h */ + U32 StatusControlRegisters; /* 28h */ + U32 TransferCount; /* 2Ch */ +} MSG_SATA_PASSTHROUGH_REPLY, MPI_POINTER PTR_MSG_SATA_PASSTHROUGH_REPLY, + SataPassthroughReply_t, MPI_POINTER pSataPassthroughReply_t; + + + + +/****************************************************************************/ +/* SAS IO Unit Control Request */ +/****************************************************************************/ + +typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST +{ + U8 Operation; /* 00h */ + U8 Reserved1; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 DevHandle; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U8 TargetID; /* 0Ch */ + U8 Bus; /* 0Dh */ + U8 PhyNum; /* 0Eh */ + U8 PrimFlags; /* 0Fh */ + U32 Primitive; /* 10h */ + U64 SASAddress; /* 14h */ + U32 Reserved4; /* 1Ch */ +} MSG_SAS_IOUNIT_CONTROL_REQUEST, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REQUEST, + SasIoUnitControlRequest_t, MPI_POINTER pSasIoUnitControlRequest_t; + +/* values for the Operation field */ +#define MPI_SAS_OP_CLEAR_NOT_PRESENT (0x01) +#define MPI_SAS_OP_CLEAR_ALL_PERSISTENT (0x02) +#define MPI_SAS_OP_PHY_LINK_RESET (0x06) +#define MPI_SAS_OP_PHY_HARD_RESET (0x07) +#define MPI_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI_SAS_OP_MAP_CURRENT (0x09) +#define MPI_SAS_OP_SEND_PRIMITIVE (0x0A) +#define MPI_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) +#define MPI_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) +#define MPI_SAS_OP_TRANSMIT_REMOVE_DEVICE (0x0D) /* obsolete name */ +#define MPI_SAS_OP_REMOVE_DEVICE (0x0D) + +/* values for the PrimFlags field */ +#define MPI_SAS_PRIMFLAGS_SINGLE (0x08) +#define MPI_SAS_PRIMFLAGS_TRIPLE (0x02) +#define MPI_SAS_PRIMFLAGS_REDUNDANT (0x01) + + +/* SAS IO Unit Control Reply */ +typedef struct _MSG_SAS_IOUNIT_CONTROL_REPLY +{ + U8 Operation; /* 00h */ + U8 Reserved1; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U16 DevHandle; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 Reserved4; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ +} MSG_SAS_IOUNIT_CONTROL_REPLY, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REPLY, + SasIoUnitControlReply_t, MPI_POINTER pSasIoUnitControlReply_t; + +#endif diff --git a/sys/dev/disk/mpt/mpilib/mpi_tool.h b/sys/dev/disk/mpt/mpilib/mpi_tool.h new file mode 100644 index 0000000000..09517b7f22 --- /dev/null +++ b/sys/dev/disk/mpt/mpilib/mpi_tool.h @@ -0,0 +1,380 @@ +/* $FreeBSD: src/sys/dev/mpt/mpilib/mpi_tool.h,v 1.1 2006/01/21 00:29:51 mjacob Exp $ */ +/*- + * Copyright (c) 2000-2005, LSI Logic Corporation and its contributors. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the name of the LSI Logic Corporation nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * + * Name: mpi_tool.h + * Title: MPI Toolbox structures and definitions + * Creation Date: July 30, 2001 + * + * mpi_tool.h Version: 01.05.03 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 08-08-01 01.02.01 Original release. + * 08-29-01 01.02.02 Added DIAG_DATA_UPLOAD_HEADER and related defines. + * 01-16-04 01.02.03 Added defines and structures for new tools + *. MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL and + * MPI_TOOLBOX_FC_MANAGEMENT_TOOL. + * 04-29-04 01.02.04 Added message structures for Diagnostic Buffer Post and + * Diagnostic Release requests and replies. + * 05-11-04 01.03.01 Original release for MPI v1.3. + * 08-19-04 01.05.01 Original release for MPI v1.5. + * 10-06-04 01.05.02 Added define for MPI_DIAG_BUF_TYPE_COUNT. + * 02-09-05 01.05.03 Added frame size option to FC management tool. + * Added Beacon tool to the Toolbox. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI_TOOL_H +#define MPI_TOOL_H + +#define MPI_TOOLBOX_CLEAN_TOOL (0x00) +#define MPI_TOOLBOX_MEMORY_MOVE_TOOL (0x01) +#define MPI_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02) +#define MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) +#define MPI_TOOLBOX_FC_MANAGEMENT_TOOL (0x04) +#define MPI_TOOLBOX_BEACON_TOOL (0x05) + + +/****************************************************************************/ +/* Toolbox reply */ +/****************************************************************************/ + +typedef struct _MSG_TOOLBOX_REPLY +{ + U8 Tool; /* 00h */ + U8 Reserved; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved1; /* 04h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 Reserved3; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ +} MSG_TOOLBOX_REPLY, MPI_POINTER PTR_MSG_TOOLBOX_REPLY, + ToolboxReply_t, MPI_POINTER pToolboxReply_t; + + +/****************************************************************************/ +/* Toolbox Clean Tool request */ +/****************************************************************************/ + +typedef struct _MSG_TOOLBOX_CLEAN_REQUEST +{ + U8 Tool; /* 00h */ + U8 Reserved; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved1; /* 04h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 Flags; /* 0Ch */ +} MSG_TOOLBOX_CLEAN_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_CLEAN_REQUEST, + ToolboxCleanRequest_t, MPI_POINTER pToolboxCleanRequest_t; + +#define MPI_TOOLBOX_CLEAN_NVSRAM (0x00000001) +#define MPI_TOOLBOX_CLEAN_SEEPROM (0x00000002) +#define MPI_TOOLBOX_CLEAN_FLASH (0x00000004) +#define MPI_TOOLBOX_CLEAN_BOOTLOADER (0x04000000) +#define MPI_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) +#define MPI_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) +#define MPI_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) +#define MPI_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000) +#define MPI_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000) + + +/****************************************************************************/ +/* Toolbox Memory Move request */ +/****************************************************************************/ + +typedef struct _MSG_TOOLBOX_MEM_MOVE_REQUEST +{ + U8 Tool; /* 00h */ + U8 Reserved; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved1; /* 04h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + SGE_SIMPLE_UNION SGL; /* 0Ch */ +} MSG_TOOLBOX_MEM_MOVE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_MEM_MOVE_REQUEST, + ToolboxMemMoveRequest_t, MPI_POINTER pToolboxMemMoveRequest_t; + + +/****************************************************************************/ +/* Toolbox Diagnostic Data Upload request */ +/****************************************************************************/ + +typedef struct _MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST +{ + U8 Tool; /* 00h */ + U8 Reserved; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved1; /* 04h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 Flags; /* 0Ch */ + U32 Reserved3; /* 10h */ + SGE_SIMPLE_UNION SGL; /* 14h */ +} MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + ToolboxDiagDataUploadRequest_t, MPI_POINTER pToolboxDiagDataUploadRequest_t; + +typedef struct _DIAG_DATA_UPLOAD_HEADER +{ + U32 DiagDataLength; /* 00h */ + U8 FormatCode; /* 04h */ + U8 Reserved; /* 05h */ + U16 Reserved1; /* 06h */ +} DIAG_DATA_UPLOAD_HEADER, MPI_POINTER PTR_DIAG_DATA_UPLOAD_HEADER, + DiagDataUploadHeader_t, MPI_POINTER pDiagDataUploadHeader_t; + +#define MPI_TB_DIAG_FORMAT_SCSI_PRINTF_1 (0x01) +#define MPI_TB_DIAG_FORMAT_SCSI_2 (0x02) +#define MPI_TB_DIAG_FORMAT_SCSI_3 (0x03) +#define MPI_TB_DIAG_FORMAT_FC_TRACE_1 (0x04) + + +/****************************************************************************/ +/* Toolbox ISTWI Read Write request */ +/****************************************************************************/ + +typedef struct _MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST +{ + U8 Tool; /* 00h */ + U8 Reserved; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved1; /* 04h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U8 Flags; /* 0Ch */ + U8 BusNum; /* 0Dh */ + U16 Reserved3; /* 0Eh */ + U8 NumAddressBytes; /* 10h */ + U8 Reserved4; /* 11h */ + U16 DataLength; /* 12h */ + U8 DeviceAddr; /* 14h */ + U8 Addr1; /* 15h */ + U8 Addr2; /* 16h */ + U8 Addr3; /* 17h */ + U32 Reserved5; /* 18h */ + SGE_SIMPLE_UNION SGL; /* 1Ch */ +} MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + ToolboxIstwiReadWriteRequest_t, MPI_POINTER pToolboxIstwiReadWriteRequest_t; + +#define MPI_TB_ISTWI_FLAGS_WRITE (0x00) +#define MPI_TB_ISTWI_FLAGS_READ (0x01) + + +/****************************************************************************/ +/* Toolbox FC Management request */ +/****************************************************************************/ + +/* ActionInfo for Bus and TargetId */ +typedef struct _MPI_TB_FC_MANAGE_BUS_TID_AI +{ + U16 Reserved; /* 00h */ + U8 Bus; /* 02h */ + U8 TargetId; /* 03h */ +} MPI_TB_FC_MANAGE_BUS_TID_AI, MPI_POINTER PTR_MPI_TB_FC_MANAGE_BUS_TID_AI, + MpiTbFcManageBusTidAi_t, MPI_POINTER pMpiTbFcManageBusTidAi_t; + +/* ActionInfo for port identifier */ +typedef struct _MPI_TB_FC_MANAGE_PID_AI +{ + U32 PortIdentifier; /* 00h */ +} MPI_TB_FC_MANAGE_PID_AI, MPI_POINTER PTR_MPI_TB_FC_MANAGE_PID_AI, + MpiTbFcManagePidAi_t, MPI_POINTER pMpiTbFcManagePidAi_t; + +/* ActionInfo for set max frame size */ +typedef struct _MPI_TB_FC_MANAGE_FRAME_SIZE_AI +{ + U16 FrameSize; /* 00h */ + U8 PortNum; /* 02h */ + U8 Reserved1; /* 03h */ +} MPI_TB_FC_MANAGE_FRAME_SIZE_AI, MPI_POINTER PTR_MPI_TB_FC_MANAGE_FRAME_SIZE_AI, + MpiTbFcManageFrameSizeAi_t, MPI_POINTER pMpiTbFcManageFrameSizeAi_t; + +/* union of ActionInfo */ +typedef union _MPI_TB_FC_MANAGE_AI_UNION +{ + MPI_TB_FC_MANAGE_BUS_TID_AI BusTid; + MPI_TB_FC_MANAGE_PID_AI Port; + MPI_TB_FC_MANAGE_FRAME_SIZE_AI FrameSize; +} MPI_TB_FC_MANAGE_AI_UNION, MPI_POINTER PTR_MPI_TB_FC_MANAGE_AI_UNION, + MpiTbFcManageAiUnion_t, MPI_POINTER pMpiTbFcManageAiUnion_t; + +typedef struct _MSG_TOOLBOX_FC_MANAGE_REQUEST +{ + U8 Tool; /* 00h */ + U8 Reserved; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved1; /* 04h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U8 Action; /* 0Ch */ + U8 Reserved3; /* 0Dh */ + U16 Reserved4; /* 0Eh */ + MPI_TB_FC_MANAGE_AI_UNION ActionInfo; /* 10h */ +} MSG_TOOLBOX_FC_MANAGE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_FC_MANAGE_REQUEST, + ToolboxFcManageRequest_t, MPI_POINTER pToolboxFcManageRequest_t; + +/* defines for the Action field */ +#define MPI_TB_FC_MANAGE_ACTION_DISC_ALL (0x00) +#define MPI_TB_FC_MANAGE_ACTION_DISC_PID (0x01) +#define MPI_TB_FC_MANAGE_ACTION_DISC_BUS_TID (0x02) +#define MPI_TB_FC_MANAGE_ACTION_SET_MAX_FRAME_SIZE (0x03) + + +/****************************************************************************/ +/* Toolbox Beacon Tool request */ +/****************************************************************************/ + +typedef struct _MSG_TOOLBOX_BEACON_REQUEST +{ + U8 Tool; /* 00h */ + U8 Reserved; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved1; /* 04h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U8 ConnectNum; /* 0Ch */ + U8 PortNum; /* 0Dh */ + U8 Reserved3; /* 0Eh */ + U8 Flags; /* 0Fh */ +} MSG_TOOLBOX_BEACON_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_BEACON_REQUEST, + ToolboxBeaconRequest_t, MPI_POINTER pToolboxBeaconRequest_t; + +#define MPI_TOOLBOX_FLAGS_BEACON_MODE_OFF (0x00) +#define MPI_TOOLBOX_FLAGS_BEACON_MODE_ON (0x01) + + +/****************************************************************************/ +/* Diagnostic Buffer Post request */ +/****************************************************************************/ + +typedef struct _MSG_DIAG_BUFFER_POST_REQUEST +{ + U8 TraceLevel; /* 00h */ + U8 BufferType; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved1; /* 04h */ + U8 Reserved2; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U32 ExtendedType; /* 0Ch */ + U32 BufferLength; /* 10h */ + U32 ProductSpecific[4]; /* 14h */ + U32 Reserved3; /* 24h */ + U64 BufferAddress; /* 28h */ +} MSG_DIAG_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_DIAG_BUFFER_POST_REQUEST, + DiagBufferPostRequest_t, MPI_POINTER pDiagBufferPostRequest_t; + +#define MPI_DIAG_BUF_TYPE_TRACE (0x00) +#define MPI_DIAG_BUF_TYPE_SNAPSHOT (0x01) +#define MPI_DIAG_BUF_TYPE_EXTENDED (0x02) +/* count of the number of buffer types */ +#define MPI_DIAG_BUF_TYPE_COUNT (0x03) + +#define MPI_DIAG_EXTENDED_QTAG (0x00000001) + + +/* Diagnostic Buffer Post reply */ +typedef struct _MSG_DIAG_BUFFER_POST_REPLY +{ + U8 Reserved1; /* 00h */ + U8 BufferType; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 Reserved4; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ + U32 TransferLength; /* 14h */ +} MSG_DIAG_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_DIAG_BUFFER_POST_REPLY, + DiagBufferPostReply_t, MPI_POINTER pDiagBufferPostReply_t; + + +/****************************************************************************/ +/* Diagnostic Release request */ +/****************************************************************************/ + +typedef struct _MSG_DIAG_RELEASE_REQUEST +{ + U8 Reserved1; /* 00h */ + U8 BufferType; /* 01h */ + U8 ChainOffset; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ +} MSG_DIAG_RELEASE_REQUEST, MPI_POINTER PTR_MSG_DIAG_RELEASE_REQUEST, + DiagReleaseRequest_t, MPI_POINTER pDiagReleaseRequest_t; + + +/* Diagnostic Release reply */ +typedef struct _MSG_DIAG_RELEASE_REPLY +{ + U8 Reserved1; /* 00h */ + U8 BufferType; /* 01h */ + U8 MsgLength; /* 02h */ + U8 Function; /* 03h */ + U16 Reserved2; /* 04h */ + U8 Reserved3; /* 06h */ + U8 MsgFlags; /* 07h */ + U32 MsgContext; /* 08h */ + U16 Reserved4; /* 0Ch */ + U16 IOCStatus; /* 0Eh */ + U32 IOCLogInfo; /* 10h */ +} MSG_DIAG_RELEASE_REPLY, MPI_POINTER PTR_MSG_DIAG_RELEASE_REPLY, + DiagReleaseReply_t, MPI_POINTER pDiagReleaseReply_t; + + +#endif diff --git a/sys/dev/disk/mpt/mpt.c b/sys/dev/disk/mpt/mpt.c index 7abff635b2..bd453d784d 100644 --- a/sys/dev/disk/mpt/mpt.c +++ b/sys/dev/disk/mpt/mpt.c @@ -1896,7 +1896,7 @@ mpt_read_config_info_ioc(struct mpt_softc *mpt) hdr.PageNumber, hdr.PageType); len = hdr.PageLength * sizeof(uint32_t); - mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); + mpt->ioc_page2 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->ioc_page2 == NULL) { mpt_prt(mpt, "unable to allocate memory for IOC page 2\n"); mpt_raid_free_mem(mpt); @@ -1961,7 +1961,7 @@ mpt_read_config_info_ioc(struct mpt_softc *mpt) } len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); - mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); + mpt->raid_volumes = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->raid_volumes == NULL) { mpt_prt(mpt, "Could not allocate RAID volume data\n"); mpt_raid_free_mem(mpt); @@ -1980,7 +1980,7 @@ mpt_read_config_info_ioc(struct mpt_softc *mpt) for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { mpt_raid = &mpt->raid_volumes[i]; mpt_raid->config_page = - malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); + kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt_raid->config_page == NULL) { mpt_prt(mpt, "Could not allocate RAID page data\n"); mpt_raid_free_mem(mpt); @@ -1990,7 +1990,7 @@ mpt_read_config_info_ioc(struct mpt_softc *mpt) mpt->raid_page0_len = len; len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); - mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); + mpt->raid_disks = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->raid_disks == NULL) { mpt_prt(mpt, "Could not allocate RAID disk data\n"); mpt_raid_free_mem(mpt); @@ -2012,7 +2012,7 @@ mpt_read_config_info_ioc(struct mpt_softc *mpt) hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); len = hdr.PageLength * sizeof(uint32_t); - mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); + mpt->ioc_page3 = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->ioc_page3 == NULL) { mpt_prt(mpt, "unable to allocate memory for IOC page 3\n"); mpt_raid_free_mem(mpt); @@ -2564,7 +2564,7 @@ mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset) mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags); len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY); - mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); + mpt->port_facts = kmalloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->port_facts == NULL) { mpt_prt(mpt, "unable to allocate memory for port facts\n"); return (ENOMEM); @@ -2625,7 +2625,7 @@ mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset) if (error != MPT_OK) { mpt_prt(mpt, "mpt_get_portfacts on port %d failed\n", port); - free(mpt->port_facts, M_DEVBUF); + kfree(mpt->port_facts, M_DEVBUF); mpt->port_facts = NULL; return (mpt_configure_ioc(mpt, tn++, 1)); } diff --git a/sys/dev/disk/mpt/mpt.h b/sys/dev/disk/mpt/mpt.h index 45c2323aef..375ea09f87 100644 --- a/sys/dev/disk/mpt/mpt.h +++ b/sys/dev/disk/mpt/mpt.h @@ -110,6 +110,7 @@ #include #include #include +#include #else #include #include @@ -254,12 +255,17 @@ struct mpt_map_info { void mpt_map_rquest(void *, bus_dma_segment_t *, int, int); /* **************************** NewBUS interrupt Crock ************************/ +#ifdef __DragonFly__ +#define mpt_setup_intr(d, i, f, U, if, ifa, hp) \ + bus_setup_intr(d, i, f, if, ifa, hp, NULL) +#else #if __FreeBSD_version < 700031 #define mpt_setup_intr(d, i, f, U, if, ifa, hp) \ bus_setup_intr(d, i, f, if, ifa, hp) #else #define mpt_setup_intr bus_setup_intr #endif +#endif /* **************************** NewBUS CAM Support ****************************/ #if __FreeBSD_version < 700049 @@ -284,7 +290,7 @@ void mpt_map_rquest(void *, bus_dma_segment_t *, int, int); #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \ kthread_create(func, farg, proc_ptr, fmtstr, arg) #define mpt_kthread_exit(status) \ - kthread_exit(status) + kthread_exit() #endif /****************************** Timer Facilities ******************************/ @@ -696,7 +702,7 @@ struct mpt_softc { u_int raid_mwce_setting; u_int raid_queue_depth; u_int raid_nonopt_volumes; - struct proc *raid_thread; + thread_t raid_thread; struct callout raid_timer; /* @@ -754,7 +760,7 @@ struct mpt_softc { struct cam_sim *phydisk_sim; struct cam_path *phydisk_path; - struct proc *recovery_thread; + thread_t recovery_thread; request_t *tmf_req; /* @@ -819,13 +825,29 @@ mpt_assign_serno(struct mpt_softc *mpt, request_t *req) /***************************** Locking Primitives *****************************/ #ifdef __DragonFly__ +#define PUSER 0 #define MPT_IFLAGS 0 #define MPT_LOCK(mpt) crit_enter() #define MPT_UNLOCK(mpt) crit_exit() #define MPT_LOCK_SETUP #define MPT_LOCK_DESTROY #define MPT_LOCK_ASSERT +#define MPTLOCK_2_CAMLOCK MPT_UNLOCK +#define CAMLOCK_2_MPTLOCK MPT_LOCK +#define splx(s) +#define splsoftvm() 0 +static __inline int +mpt_sleep(struct mpt_softc *mpt, void *ident, int priority, + const char *wmesg, int timo) { + int error; + error = tsleep(ident, 0, wmesg, timo); + return(error); +} #endif +#define mpt_req_timeout(req, ticks, func, arg) \ + callout_reset(&(req)->callout, (ticks), (func), (arg)); +#define mpt_req_untimeout(req, func, arg) \ + callout_stop(&(req)->callout) #if 0 #if __FreeBSD_version < 500000 #define MPT_IFLAGS INTR_TYPE_CAM diff --git a/sys/dev/disk/mpt/mpt_cam.c b/sys/dev/disk/mpt/mpt_cam.c new file mode 100644 index 0000000000..56dae250f7 --- /dev/null +++ b/sys/dev/disk/mpt/mpt_cam.c @@ -0,0 +1,5547 @@ +/*- + * FreeBSD/CAM specific routines for LSI '909 FC adapters. + * FreeBSD Version. + * + * Copyright (c) 2000, 2001 by Greg Ansley + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice immediately at the beginning of the file, without modification, + * this list of conditions, and the following disclaimer. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/*- + * Copyright (c) 2002, 2006 by Matthew Jacob + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the names of the above listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Support from Chris Ellsworth in order to make SAS adapters work + * is gratefully acknowledged. + * + * Support from LSI-Logic has also gone a great deal toward making this a + * workable subsystem and is gratefully acknowledged. + */ +/*- + * Copyright (c) 2004, Avid Technology, Inc. and its contributors. + * Copyright (c) 2005, WHEEL Sp. z o.o. + * Copyright (c) 2004, 2005 Justin T. Gibbs + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the names of the above listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.68 2009/07/02 00:43:10 delphij Exp $ + */ +#include + +#include +#include +#include + +#include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ +#include "dev/disk/mpt/mpilib/mpi_init.h" +#include "dev/disk/mpt/mpilib/mpi_targ.h" +#include "dev/disk/mpt/mpilib/mpi_fc.h" +#include "dev/disk/mpt/mpilib/mpi_sas.h" +#if __FreeBSD_version >= 500000 +#include +#endif +#include +#include + +#if __FreeBSD_version >= 700025 || defined(__DragonFly__) +#ifndef CAM_NEW_TRAN_CODE +#define CAM_NEW_TRAN_CODE 1 +#endif +#endif + +static void mpt_poll(struct cam_sim *); +static timeout_t mpt_timeout; +static void mpt_action(struct cam_sim *, union ccb *); +static int +mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); +static void mpt_setwidth(struct mpt_softc *, int, int); +static void mpt_setsync(struct mpt_softc *, int, int, int); +static int mpt_update_spi_config(struct mpt_softc *, int); +static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended); + +static mpt_reply_handler_t mpt_scsi_reply_handler; +static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; +static mpt_reply_handler_t mpt_fc_els_reply_handler; +static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, + MSG_DEFAULT_REPLY *); +static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); +static int mpt_fc_reset_link(struct mpt_softc *, int); + +static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); +static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); +static void mpt_recovery_thread(void *arg); +static void mpt_recover_commands(struct mpt_softc *mpt); + +static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, + u_int, u_int, u_int, int); + +static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); +static void mpt_post_target_command(struct mpt_softc *, request_t *, int); +static int mpt_add_els_buffers(struct mpt_softc *mpt); +static int mpt_add_target_commands(struct mpt_softc *mpt); +static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); +static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); +static void mpt_target_start_io(struct mpt_softc *, union ccb *); +static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); +static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); +static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, + uint8_t, uint8_t const *); +static void +mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, + tgt_resource_t *, int); +static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); +static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); +static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; +static mpt_reply_handler_t mpt_sata_pass_reply_handler; + +static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; +static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; +static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; +static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; + +static mpt_probe_handler_t mpt_cam_probe; +static mpt_attach_handler_t mpt_cam_attach; +static mpt_enable_handler_t mpt_cam_enable; +static mpt_ready_handler_t mpt_cam_ready; +static mpt_event_handler_t mpt_cam_event; +static mpt_reset_handler_t mpt_cam_ioc_reset; +static mpt_detach_handler_t mpt_cam_detach; + +static struct mpt_personality mpt_cam_personality = +{ + .name = "mpt_cam", + .probe = mpt_cam_probe, + .attach = mpt_cam_attach, + .enable = mpt_cam_enable, + .ready = mpt_cam_ready, + .event = mpt_cam_event, + .reset = mpt_cam_ioc_reset, + .detach = mpt_cam_detach, +}; + +DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); +MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); + +int mpt_enable_sata_wc = -1; +TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); + +int +mpt_cam_probe(struct mpt_softc *mpt) +{ + int role; + + /* + * Only attach to nodes that support the initiator or target role + * (or want to) or have RAID physical devices that need CAM pass-thru + * support. + */ + if (mpt->do_cfg_role) { + role = mpt->cfg_role; + } else { + role = mpt->role; + } + if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || + (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { + return (0); + } + return (ENODEV); +} + +int +mpt_cam_attach(struct mpt_softc *mpt) +{ + struct cam_devq *devq; + mpt_handler_t handler; + int maxq; + int error; + + MPT_LOCK(mpt); + TAILQ_INIT(&mpt->request_timeout_list); + maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? + mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); + + handler.reply_handler = mpt_scsi_reply_handler; + error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, + &scsi_io_handler_id); + if (error != 0) { + MPT_UNLOCK(mpt); + goto cleanup; + } + + handler.reply_handler = mpt_scsi_tmf_reply_handler; + error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, + &scsi_tmf_handler_id); + if (error != 0) { + MPT_UNLOCK(mpt); + goto cleanup; + } + + /* + * If we're fibre channel and could support target mode, we register + * an ELS reply handler and give it resources. + */ + if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { + handler.reply_handler = mpt_fc_els_reply_handler; + error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, + &fc_els_handler_id); + if (error != 0) { + MPT_UNLOCK(mpt); + goto cleanup; + } + if (mpt_add_els_buffers(mpt) == FALSE) { + error = ENOMEM; + MPT_UNLOCK(mpt); + goto cleanup; + } + maxq -= mpt->els_cmds_allocated; + } + + /* + * If we support target mode, we register a reply handler for it, + * but don't add command resources until we actually enable target + * mode. + */ + if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { + handler.reply_handler = mpt_scsi_tgt_reply_handler; + error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, + &mpt->scsi_tgt_handler_id); + if (error != 0) { + MPT_UNLOCK(mpt); + goto cleanup; + } + } + + if (mpt->is_sas) { + handler.reply_handler = mpt_sata_pass_reply_handler; + error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, + &sata_pass_handler_id); + if (error != 0) { + MPT_UNLOCK(mpt); + goto cleanup; + } + } + + /* + * We keep one request reserved for timeout TMF requests. + */ + mpt->tmf_req = mpt_get_request(mpt, FALSE); + if (mpt->tmf_req == NULL) { + mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); + error = ENOMEM; + MPT_UNLOCK(mpt); + goto cleanup; + } + + /* + * Mark the request as free even though not on the free list. + * There is only one TMF request allowed to be outstanding at + * a time and the TMF routines perform their own allocation + * tracking using the standard state flags. + */ + mpt->tmf_req->state = REQ_STATE_FREE; + maxq--; + + /* + * The rest of this is CAM foo, for which we need to drop our lock + */ + MPT_UNLOCK(mpt); + + if (mpt_spawn_recovery_thread(mpt) != 0) { + mpt_prt(mpt, "Unable to spawn recovery thread!\n"); + error = ENOMEM; + goto cleanup; + } + + /* + * Create the device queue for our SIM(s). + */ + devq = cam_simq_alloc(maxq); + if (devq == NULL) { + mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); + error = ENOMEM; + goto cleanup; + } + + /* + * Construct our SIM entry. + */ + mpt->sim = + mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); + if (mpt->sim == NULL) { + mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); + cam_devq_release(devq); + error = ENOMEM; + goto cleanup; + } + + /* + * Register exactly this bus. + */ + MPT_LOCK(mpt); + if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { + mpt_prt(mpt, "Bus registration Failed!\n"); + error = ENOMEM; + MPT_UNLOCK(mpt); + goto cleanup; + } + + if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), + CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + mpt_prt(mpt, "Unable to allocate Path!\n"); + error = ENOMEM; + MPT_UNLOCK(mpt); + goto cleanup; + } + MPT_UNLOCK(mpt); + + /* + * Only register a second bus for RAID physical + * devices if the controller supports RAID. + */ + if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { + return (0); + } + + /* + * Create a "bus" to export all hidden disks to CAM. + */ + mpt->phydisk_sim = + mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); + if (mpt->phydisk_sim == NULL) { + mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); + error = ENOMEM; + goto cleanup; + } + + /* + * Register this bus. + */ + MPT_LOCK(mpt); + if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != + CAM_SUCCESS) { + mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); + error = ENOMEM; + MPT_UNLOCK(mpt); + goto cleanup; + } + + if (xpt_create_path(&mpt->phydisk_path, NULL, + cam_sim_path(mpt->phydisk_sim), + CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); + error = ENOMEM; + MPT_UNLOCK(mpt); + goto cleanup; + } + MPT_UNLOCK(mpt); + mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); + return (0); + +cleanup: + mpt_cam_detach(mpt); + return (error); +} + +/* + * Read FC configuration information + */ +static int +mpt_read_config_info_fc(struct mpt_softc *mpt) +{ + char *topology = NULL; + int rv; + + rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, + 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); + if (rv) { + return (-1); + } + mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", + mpt->mpt_fcport_page0.Header.PageVersion, + mpt->mpt_fcport_page0.Header.PageLength, + mpt->mpt_fcport_page0.Header.PageNumber, + mpt->mpt_fcport_page0.Header.PageType); + + + rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, + sizeof(mpt->mpt_fcport_page0), FALSE, 5000); + if (rv) { + mpt_prt(mpt, "failed to read FC Port Page 0\n"); + return (-1); + } + mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); + + mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed; + + switch (mpt->mpt_fcport_page0.Flags & + MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { + case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: + mpt->mpt_fcport_speed = 0; + topology = ""; + break; + case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: + topology = "N-Port"; + break; + case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: + topology = "NL-Port"; + break; + case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: + topology = "F-Port"; + break; + case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: + topology = "FL-Port"; + break; + default: + mpt->mpt_fcport_speed = 0; + topology = "?"; + break; + } + + mpt_lprt(mpt, MPT_PRT_INFO, + "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x " + "Speed %u-Gbit\n", topology, + mpt->mpt_fcport_page0.WWNN.High, + mpt->mpt_fcport_page0.WWNN.Low, + mpt->mpt_fcport_page0.WWPN.High, + mpt->mpt_fcport_page0.WWPN.Low, + mpt->mpt_fcport_speed); +#if __FreeBSD_version >= 500000 + MPT_UNLOCK(mpt); + { + struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); + struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); + + snprintf(mpt->scinfo.fc.wwnn, + sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x", + mpt->mpt_fcport_page0.WWNN.High, + mpt->mpt_fcport_page0.WWNN.Low); + + snprintf(mpt->scinfo.fc.wwpn, + sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x", + mpt->mpt_fcport_page0.WWPN.High, + mpt->mpt_fcport_page0.WWPN.Low); + + SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, + "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0, + "World Wide Node Name"); + + SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, + "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0, + "World Wide Port Name"); + + } + MPT_LOCK(mpt); +#endif + return (0); +} + +/* + * Set FC configuration information. + */ +static int +mpt_set_initial_config_fc(struct mpt_softc *mpt) +{ + + CONFIG_PAGE_FC_PORT_1 fc; + U32 fl; + int r, doit = 0; + int role; + + r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, + &fc.Header, FALSE, 5000); + if (r) { + mpt_prt(mpt, "failed to read FC page 1 header\n"); + return (mpt_fc_reset_link(mpt, 1)); + } + + r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, + &fc.Header, sizeof (fc), FALSE, 5000); + if (r) { + mpt_prt(mpt, "failed to read FC page 1\n"); + return (mpt_fc_reset_link(mpt, 1)); + } + mpt2host_config_page_fc_port_1(&fc); + + /* + * Check our flags to make sure we support the role we want. + */ + doit = 0; + role = 0; + fl = fc.Flags; + + if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { + role |= MPT_ROLE_INITIATOR; + } + if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { + role |= MPT_ROLE_TARGET; + } + + fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; + + if (mpt->do_cfg_role == 0) { + role = mpt->cfg_role; + } else { + mpt->do_cfg_role = 0; + } + + if (role != mpt->cfg_role) { + if (mpt->cfg_role & MPT_ROLE_INITIATOR) { + if ((role & MPT_ROLE_INITIATOR) == 0) { + mpt_prt(mpt, "adding initiator role\n"); + fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; + doit++; + } else { + mpt_prt(mpt, "keeping initiator role\n"); + } + } else if (role & MPT_ROLE_INITIATOR) { + mpt_prt(mpt, "removing initiator role\n"); + doit++; + } + if (mpt->cfg_role & MPT_ROLE_TARGET) { + if ((role & MPT_ROLE_TARGET) == 0) { + mpt_prt(mpt, "adding target role\n"); + fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; + doit++; + } else { + mpt_prt(mpt, "keeping target role\n"); + } + } else if (role & MPT_ROLE_TARGET) { + mpt_prt(mpt, "removing target role\n"); + doit++; + } + mpt->role = mpt->cfg_role; + } + + if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { + if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { + mpt_prt(mpt, "adding OXID option\n"); + fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; + doit++; + } + } + + if (doit) { + fc.Flags = fl; + host2mpt_config_page_fc_port_1(&fc); + r = mpt_write_cfg_page(mpt, + MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, + sizeof(fc), FALSE, 5000); + if (r != 0) { + mpt_prt(mpt, "failed to update NVRAM with changes\n"); + return (0); + } + mpt_prt(mpt, "NOTE: NVRAM changes will not take " + "effect until next reboot or IOC reset\n"); + } + return (0); +} + +static int +mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) +{ + ConfigExtendedPageHeader_t hdr; + struct mptsas_phyinfo *phyinfo; + SasIOUnitPage0_t *buffer; + int error, len, i; + + error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, + 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, + &hdr, 0, 10000); + if (error) + goto out; + if (hdr.ExtPageLength == 0) { + error = ENXIO; + goto out; + } + + len = hdr.ExtPageLength * 4; + buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); + if (buffer == NULL) { + error = ENOMEM; + goto out; + } + + error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, + 0, &hdr, buffer, len, 0, 10000); + if (error) { + kfree(buffer, M_DEVBUF); + goto out; + } + + portinfo->num_phys = buffer->NumPhys; + portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) * + portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); + if (portinfo->phy_info == NULL) { + kfree(buffer, M_DEVBUF); + error = ENOMEM; + goto out; + } + + for (i = 0; i < portinfo->num_phys; i++) { + phyinfo = &portinfo->phy_info[i]; + phyinfo->phy_num = i; + phyinfo->port_id = buffer->PhyData[i].Port; + phyinfo->negotiated_link_rate = + buffer->PhyData[i].NegotiatedLinkRate; + phyinfo->handle = + le16toh(buffer->PhyData[i].ControllerDevHandle); + } + + kfree(buffer, M_DEVBUF); +out: + return (error); +} + +static int +mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, + uint32_t form, uint32_t form_specific) +{ + ConfigExtendedPageHeader_t hdr; + SasPhyPage0_t *buffer; + int error; + + error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, + MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, + 0, 10000); + if (error) + goto out; + if (hdr.ExtPageLength == 0) { + error = ENXIO; + goto out; + } + + buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); + if (buffer == NULL) { + error = ENOMEM; + goto out; + } + + error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, + form + form_specific, &hdr, buffer, + sizeof(SasPhyPage0_t), 0, 10000); + if (error) { + kfree(buffer, M_DEVBUF); + goto out; + } + + phy_info->hw_link_rate = buffer->HwLinkRate; + phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; + phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); + phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); + + kfree(buffer, M_DEVBUF); +out: + return (error); +} + +static int +mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, + uint32_t form, uint32_t form_specific) +{ + ConfigExtendedPageHeader_t hdr; + SasDevicePage0_t *buffer; + uint64_t sas_address; + int error = 0; + + bzero(device_info, sizeof(*device_info)); + error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, + MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, + &hdr, 0, 10000); + if (error) + goto out; + if (hdr.ExtPageLength == 0) { + error = ENXIO; + goto out; + } + + buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); + if (buffer == NULL) { + error = ENOMEM; + goto out; + } + + error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, + form + form_specific, &hdr, buffer, + sizeof(SasDevicePage0_t), 0, 10000); + if (error) { + kfree(buffer, M_DEVBUF); + goto out; + } + + device_info->dev_handle = le16toh(buffer->DevHandle); + device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); + device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); + device_info->slot = le16toh(buffer->Slot); + device_info->phy_num = buffer->PhyNum; + device_info->physical_port = buffer->PhysicalPort; + device_info->target_id = buffer->TargetID; + device_info->bus = buffer->Bus; + bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); + device_info->sas_address = le64toh(sas_address); + device_info->device_info = le32toh(buffer->DeviceInfo); + + kfree(buffer, M_DEVBUF); +out: + return (error); +} + +/* + * Read SAS configuration information. Nothing to do yet. + */ +static int +mpt_read_config_info_sas(struct mpt_softc *mpt) +{ + struct mptsas_portinfo *portinfo; + struct mptsas_phyinfo *phyinfo; + int error, i; + + portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); + if (portinfo == NULL) + return (ENOMEM); + + error = mptsas_sas_io_unit_pg0(mpt, portinfo); + if (error) { + kfree(portinfo, M_DEVBUF); + return (0); + } + + for (i = 0; i < portinfo->num_phys; i++) { + phyinfo = &portinfo->phy_info[i]; + error = mptsas_sas_phy_pg0(mpt, phyinfo, + (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << + MPI_SAS_PHY_PGAD_FORM_SHIFT), i); + if (error) + break; + error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, + (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + phyinfo->handle); + if (error) + break; + phyinfo->identify.phy_num = phyinfo->phy_num = i; + if (phyinfo->attached.dev_handle) + error = mptsas_sas_device_pg0(mpt, + &phyinfo->attached, + (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + phyinfo->attached.dev_handle); + if (error) + break; + } + mpt->sas_portinfo = portinfo; + return (0); +} + +static void +mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, + int enabled) +{ + SataPassthroughRequest_t *pass; + request_t *req; + int error, status; + + req = mpt_get_request(mpt, 0); + if (req == NULL) + return; + + pass = req->req_vbuf; + bzero(pass, sizeof(SataPassthroughRequest_t)); + pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; + pass->TargetID = devinfo->target_id; + pass->Bus = devinfo->bus; + pass->PassthroughFlags = 0; + pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; + pass->DataLength = 0; + pass->MsgContext = htole32(req->index | sata_pass_handler_id); + pass->CommandFIS[0] = 0x27; + pass->CommandFIS[1] = 0x80; + pass->CommandFIS[2] = 0xef; + pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; + pass->CommandFIS[7] = 0x40; + pass->CommandFIS[15] = 0x08; + + mpt_check_doorbell(mpt); + mpt_send_cmd(mpt, req); + error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, + 10 * 1000); + if (error) { + mpt_free_request(mpt, req); + kprintf("error %d sending passthrough\n", error); + return; + } + + status = le16toh(req->IOCStatus); + if (status != MPI_IOCSTATUS_SUCCESS) { + mpt_free_request(mpt, req); + kprintf("IOCSTATUS %d\n", status); + return; + } + + mpt_free_request(mpt, req); +} + +/* + * Set SAS configuration information. Nothing to do yet. + */ +static int +mpt_set_initial_config_sas(struct mpt_softc *mpt) +{ + struct mptsas_phyinfo *phyinfo; + int i; + + if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { + for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { + phyinfo = &mpt->sas_portinfo->phy_info[i]; + if (phyinfo->attached.dev_handle == 0) + continue; + if ((phyinfo->attached.device_info & + MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) + continue; + if (bootverbose) + device_printf(mpt->dev, + "%sabling SATA WC on phy %d\n", + (mpt_enable_sata_wc) ? "En" : "Dis", i); + mptsas_set_sata_wc(mpt, &phyinfo->attached, + mpt_enable_sata_wc); + } + } + + return (0); +} + +static int +mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, + uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) +{ + if (req != NULL) { + + if (reply_frame != NULL) { + req->IOCStatus = le16toh(reply_frame->IOCStatus); + } + req->state &= ~REQ_STATE_QUEUED; + req->state |= REQ_STATE_DONE; + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { + wakeup(req); + } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { + /* + * Whew- we can free this request (late completion) + */ + mpt_free_request(mpt, req); + } + } + + return (TRUE); +} + +/* + * Read SCSI configuration information + */ +static int +mpt_read_config_info_spi(struct mpt_softc *mpt) +{ + int rv, i; + + rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, + &mpt->mpt_port_page0.Header, FALSE, 5000); + if (rv) { + return (-1); + } + mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", + mpt->mpt_port_page0.Header.PageVersion, + mpt->mpt_port_page0.Header.PageLength, + mpt->mpt_port_page0.Header.PageNumber, + mpt->mpt_port_page0.Header.PageType); + + rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, + &mpt->mpt_port_page1.Header, FALSE, 5000); + if (rv) { + return (-1); + } + mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", + mpt->mpt_port_page1.Header.PageVersion, + mpt->mpt_port_page1.Header.PageLength, + mpt->mpt_port_page1.Header.PageNumber, + mpt->mpt_port_page1.Header.PageType); + + rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, + &mpt->mpt_port_page2.Header, FALSE, 5000); + if (rv) { + return (-1); + } + mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", + mpt->mpt_port_page2.Header.PageVersion, + mpt->mpt_port_page2.Header.PageLength, + mpt->mpt_port_page2.Header.PageNumber, + mpt->mpt_port_page2.Header.PageType); + + for (i = 0; i < 16; i++) { + rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, + 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); + if (rv) { + return (-1); + } + mpt_lprt(mpt, MPT_PRT_DEBUG, + "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, + mpt->mpt_dev_page0[i].Header.PageVersion, + mpt->mpt_dev_page0[i].Header.PageLength, + mpt->mpt_dev_page0[i].Header.PageNumber, + mpt->mpt_dev_page0[i].Header.PageType); + + rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, + 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); + if (rv) { + return (-1); + } + mpt_lprt(mpt, MPT_PRT_DEBUG, + "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, + mpt->mpt_dev_page1[i].Header.PageVersion, + mpt->mpt_dev_page1[i].Header.PageLength, + mpt->mpt_dev_page1[i].Header.PageNumber, + mpt->mpt_dev_page1[i].Header.PageType); + } + + /* + * At this point, we don't *have* to fail. As long as we have + * valid config header information, we can (barely) lurch + * along. + */ + + rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, + sizeof(mpt->mpt_port_page0), FALSE, 5000); + if (rv) { + mpt_prt(mpt, "failed to read SPI Port Page 0\n"); + } else { + mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); + mpt_lprt(mpt, MPT_PRT_NEGOTIATION, + "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", + mpt->mpt_port_page0.Capabilities, + mpt->mpt_port_page0.PhysicalInterface); + } + + rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, + sizeof(mpt->mpt_port_page1), FALSE, 5000); + if (rv) { + mpt_prt(mpt, "failed to read SPI Port Page 1\n"); + } else { + mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); + mpt_lprt(mpt, MPT_PRT_DEBUG, + "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", + mpt->mpt_port_page1.Configuration, + mpt->mpt_port_page1.OnBusTimerValue); + } + + rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, + sizeof(mpt->mpt_port_page2), FALSE, 5000); + if (rv) { + mpt_prt(mpt, "failed to read SPI Port Page 2\n"); + } else { + mpt_lprt(mpt, MPT_PRT_NEGOTIATION, + "Port Page 2: Flags %x Settings %x\n", + mpt->mpt_port_page2.PortFlags, + mpt->mpt_port_page2.PortSettings); + mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); + for (i = 0; i < 16; i++) { + mpt_lprt(mpt, MPT_PRT_NEGOTIATION, + " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", + i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, + mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, + mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); + } + } + + for (i = 0; i < 16; i++) { + rv = mpt_read_cur_cfg_page(mpt, i, + &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), + FALSE, 5000); + if (rv) { + mpt_prt(mpt, + "cannot read SPI Target %d Device Page 0\n", i); + continue; + } + mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); + mpt_lprt(mpt, MPT_PRT_NEGOTIATION, + "target %d page 0: Negotiated Params %x Information %x\n", + i, mpt->mpt_dev_page0[i].NegotiatedParameters, + mpt->mpt_dev_page0[i].Information); + + rv = mpt_read_cur_cfg_page(mpt, i, + &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), + FALSE, 5000); + if (rv) { + mpt_prt(mpt, + "cannot read SPI Target %d Device Page 1\n", i); + continue; + } + mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); + mpt_lprt(mpt, MPT_PRT_NEGOTIATION, + "target %d page 1: Requested Params %x Configuration %x\n", + i, mpt->mpt_dev_page1[i].RequestedParameters, + mpt->mpt_dev_page1[i].Configuration); + } + return (0); +} + +/* + * Validate SPI configuration information. + * + * In particular, validate SPI Port Page 1. + */ +static int +mpt_set_initial_config_spi(struct mpt_softc *mpt) +{ + int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id; + int error; + + mpt->mpt_disc_enable = 0xff; + mpt->mpt_tag_enable = 0; + + if (mpt->mpt_port_page1.Configuration != pp1val) { + CONFIG_PAGE_SCSI_PORT_1 tmp; + + mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " + "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); + tmp = mpt->mpt_port_page1; + tmp.Configuration = pp1val; + host2mpt_config_page_scsi_port_1(&tmp); + error = mpt_write_cur_cfg_page(mpt, 0, + &tmp.Header, sizeof(tmp), FALSE, 5000); + if (error) { + return (-1); + } + error = mpt_read_cur_cfg_page(mpt, 0, + &tmp.Header, sizeof(tmp), FALSE, 5000); + if (error) { + return (-1); + } + mpt2host_config_page_scsi_port_1(&tmp); + if (tmp.Configuration != pp1val) { + mpt_prt(mpt, + "failed to reset SPI Port Page 1 Config value\n"); + return (-1); + } + mpt->mpt_port_page1 = tmp; + } + + /* + * The purpose of this exercise is to get + * all targets back to async/narrow. + * + * We skip this step if the BIOS has already negotiated + * speeds with the targets. + */ + i = mpt->mpt_port_page2.PortSettings & + MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; + if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { + mpt_lprt(mpt, MPT_PRT_NEGOTIATION, + "honoring BIOS transfer negotiations\n"); + } else { + for (i = 0; i < 16; i++) { + mpt->mpt_dev_page1[i].RequestedParameters = 0; + mpt->mpt_dev_page1[i].Configuration = 0; + (void) mpt_update_spi_config(mpt, i); + } + } + return (0); +} + +int +mpt_cam_enable(struct mpt_softc *mpt) +{ + int error; + + MPT_LOCK(mpt); + + error = EIO; + if (mpt->is_fc) { + if (mpt_read_config_info_fc(mpt)) { + goto out; + } + if (mpt_set_initial_config_fc(mpt)) { + goto out; + } + } else if (mpt->is_sas) { + if (mpt_read_config_info_sas(mpt)) { + goto out; + } + if (mpt_set_initial_config_sas(mpt)) { + goto out; + } + } else if (mpt->is_spi) { + if (mpt_read_config_info_spi(mpt)) { + goto out; + } + if (mpt_set_initial_config_spi(mpt)) { + goto out; + } + } + error = 0; + +out: + MPT_UNLOCK(mpt); + return (error); +} + +void +mpt_cam_ready(struct mpt_softc *mpt) +{ + /* + * If we're in target mode, hang out resources now + * so we don't cause the world to hang talking to us. + */ + if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { + /* + * Try to add some target command resources + */ + MPT_LOCK(mpt); + if (mpt_add_target_commands(mpt) == FALSE) { + mpt_prt(mpt, "failed to add target commands\n"); + } + MPT_UNLOCK(mpt); + } + mpt->ready = 1; +} + +void +mpt_cam_detach(struct mpt_softc *mpt) +{ + mpt_handler_t handler; + + MPT_LOCK(mpt); + mpt->ready = 0; + mpt_terminate_recovery_thread(mpt); + + handler.reply_handler = mpt_scsi_reply_handler; + mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, + scsi_io_handler_id); + handler.reply_handler = mpt_scsi_tmf_reply_handler; + mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, + scsi_tmf_handler_id); + handler.reply_handler = mpt_fc_els_reply_handler; + mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, + fc_els_handler_id); + handler.reply_handler = mpt_scsi_tgt_reply_handler; + mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, + mpt->scsi_tgt_handler_id); + handler.reply_handler = mpt_sata_pass_reply_handler; + mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, + sata_pass_handler_id); + + if (mpt->tmf_req != NULL) { + mpt->tmf_req->state = REQ_STATE_ALLOCATED; + mpt_free_request(mpt, mpt->tmf_req); + mpt->tmf_req = NULL; + } + if (mpt->sas_portinfo != NULL) { + kfree(mpt->sas_portinfo, M_DEVBUF); + mpt->sas_portinfo = NULL; + } + MPT_UNLOCK(mpt); + + if (mpt->sim != NULL) { + xpt_free_path(mpt->path); + xpt_bus_deregister(cam_sim_path(mpt->sim)); + cam_sim_free(mpt->sim); + mpt->sim = NULL; + } + + if (mpt->phydisk_sim != NULL) { + xpt_free_path(mpt->phydisk_path); + xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); + cam_sim_free(mpt->phydisk_sim); + mpt->phydisk_sim = NULL; + } +} + +/* This routine is used after a system crash to dump core onto the swap device. + */ +static void +mpt_poll(struct cam_sim *sim) +{ + struct mpt_softc *mpt; + + mpt = (struct mpt_softc *)cam_sim_softc(sim); + mpt_intr(mpt); +} + +/* + * Watchdog timeout routine for SCSI requests. + */ +static void +mpt_timeout(void *arg) +{ + union ccb *ccb; + struct mpt_softc *mpt; + request_t *req; + + ccb = (union ccb *)arg; + mpt = ccb->ccb_h.ccb_mpt_ptr; + + MPT_LOCK(mpt); + req = ccb->ccb_h.ccb_req_ptr; + mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, + req->serno, ccb, req->ccb); +/* XXX: WHAT ARE WE TRYING TO DO HERE? */ + if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); + req->state |= REQ_STATE_TIMEDOUT; + mpt_wakeup_recovery_thread(mpt); + } + MPT_UNLOCK(mpt); +} + +/* + * Callback routine from "bus_dmamap_load" or, in simple cases, called directly. + * + * Takes a list of physical segments and builds the SGL for SCSI IO command + * and forwards the commard to the IOC after one last check that CAM has not + * aborted the transaction. + */ +static void +mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) +{ + request_t *req, *trq; + char *mpt_off; + union ccb *ccb; + struct mpt_softc *mpt; + int seg, first_lim; + uint32_t flags, nxt_off; + void *sglp = NULL; + MSG_REQUEST_HEADER *hdrp; + SGE_SIMPLE64 *se; + SGE_CHAIN64 *ce; + int istgt = 0; + + req = (request_t *)arg; + ccb = req->ccb; + + mpt = ccb->ccb_h.ccb_mpt_ptr; + req = ccb->ccb_h.ccb_req_ptr; + + hdrp = req->req_vbuf; + mpt_off = req->req_vbuf; + + if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { + error = EFBIG; + } + + if (error == 0) { + switch (hdrp->Function) { + case MPI_FUNCTION_SCSI_IO_REQUEST: + case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + istgt = 0; + sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; + break; + case MPI_FUNCTION_TARGET_ASSIST: + istgt = 1; + sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; + break; + default: + mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", + hdrp->Function); + error = EINVAL; + break; + } + } + + if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { + error = EFBIG; + mpt_prt(mpt, "segment count %d too large (max %u)\n", + nseg, mpt->max_seg_cnt); + } + +bad: + if (error != 0) { + if (error != EFBIG && error != ENOMEM) { + mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); + } + if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { + cam_status status; + mpt_freeze_ccb(ccb); + if (error == EFBIG) { + status = CAM_REQ_TOO_BIG; + } else if (error == ENOMEM) { + if (mpt->outofbeer == 0) { + mpt->outofbeer = 1; + xpt_freeze_simq(mpt->sim, 1); + mpt_lprt(mpt, MPT_PRT_DEBUG, + "FREEZEQ\n"); + } + status = CAM_REQUEUE_REQ; + } else { + status = CAM_REQ_CMP_ERR; + } + mpt_set_ccb_status(ccb, status); + } + if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { + request_t *cmd_req = + MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); + MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; + MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; + MPT_TGT_STATE(mpt, cmd_req)->req = NULL; + } + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + mpt_free_request(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); + return; + } + + /* + * No data to transfer? + * Just make a single simple SGL with zero length. + */ + + if (mpt->verbose >= MPT_PRT_DEBUG) { + int tidx = ((char *)sglp) - mpt_off; + memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); + } + + if (nseg == 0) { + SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; + MPI_pSGE_SET_FLAGS(se1, + (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | + MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); + se1->FlagsLength = htole32(se1->FlagsLength); + goto out; + } + + + flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; + if (istgt == 0) { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { + flags |= MPI_SGE_FLAGS_HOST_TO_IOC; + } + } else { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + flags |= MPI_SGE_FLAGS_HOST_TO_IOC; + } + } + + if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { + bus_dmasync_op_t op; + if (istgt == 0) { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + op = BUS_DMASYNC_PREREAD; + } else { + op = BUS_DMASYNC_PREWRITE; + } + } else { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + op = BUS_DMASYNC_PREWRITE; + } else { + op = BUS_DMASYNC_PREREAD; + } + } + bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); + } + + /* + * Okay, fill in what we can at the end of the command frame. + * If we have up to MPT_NSGL_FIRST, we can fit them all into + * the command frame. + * + * Otherwise, we fill up through MPT_NSGL_FIRST less one + * SIMPLE64 pointers and start doing CHAIN64 entries after + * that. + */ + + if (nseg < MPT_NSGL_FIRST(mpt)) { + first_lim = nseg; + } else { + /* + * Leave room for CHAIN element + */ + first_lim = MPT_NSGL_FIRST(mpt) - 1; + } + + se = (SGE_SIMPLE64 *) sglp; + for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { + uint32_t tf; + + memset(se, 0, sizeof (*se)); + se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); + if (sizeof(bus_addr_t) > 4) { + se->Address.High = + htole32(((uint64_t)dm_segs->ds_addr) >> 32); + } + MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); + tf = flags; + if (seg == first_lim - 1) { + tf |= MPI_SGE_FLAGS_LAST_ELEMENT; + } + if (seg == nseg - 1) { + tf |= MPI_SGE_FLAGS_END_OF_LIST | + MPI_SGE_FLAGS_END_OF_BUFFER; + } + MPI_pSGE_SET_FLAGS(se, tf); + se->FlagsLength = htole32(se->FlagsLength); + } + + if (seg == nseg) { + goto out; + } + + /* + * Tell the IOC where to find the first chain element. + */ + hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; + nxt_off = MPT_RQSL(mpt); + trq = req; + + /* + * Make up the rest of the data segments out of a chain element + * (contiained in the current request frame) which points to + * SIMPLE64 elements in the next request frame, possibly ending + * with *another* chain element (if there's more). + */ + while (seg < nseg) { + int this_seg_lim; + uint32_t tf, cur_off; + bus_addr_t chain_list_addr; + + /* + * Point to the chain descriptor. Note that the chain + * descriptor is at the end of the *previous* list (whether + * chain or simple). + */ + ce = (SGE_CHAIN64 *) se; + + /* + * Before we change our current pointer, make sure we won't + * overflow the request area with this frame. Note that we + * test against 'greater than' here as it's okay in this case + * to have next offset be just outside the request area. + */ + if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { + nxt_off = MPT_REQUEST_AREA; + goto next_chain; + } + + /* + * Set our SGE element pointer to the beginning of the chain + * list and update our next chain list offset. + */ + se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; + cur_off = nxt_off; + nxt_off += MPT_RQSL(mpt); + + /* + * Now initialized the chain descriptor. + */ + memset(ce, 0, sizeof (*ce)); + + /* + * Get the physical address of the chain list. + */ + chain_list_addr = trq->req_pbuf; + chain_list_addr += cur_off; + if (sizeof (bus_addr_t) > 4) { + ce->Address.High = + htole32(((uint64_t)chain_list_addr) >> 32); + } + ce->Address.Low = htole32(chain_list_addr & 0xffffffff); + ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | + MPI_SGE_FLAGS_64_BIT_ADDRESSING; + + /* + * If we have more than a frame's worth of segments left, + * set up the chain list to have the last element be another + * chain descriptor. + */ + if ((nseg - seg) > MPT_NSGL(mpt)) { + this_seg_lim = seg + MPT_NSGL(mpt) - 1; + /* + * The length of the chain is the length in bytes of the + * number of segments plus the next chain element. + * + * The next chain descriptor offset is the length, + * in words, of the number of segments. + */ + ce->Length = (this_seg_lim - seg) * + sizeof (SGE_SIMPLE64); + ce->NextChainOffset = ce->Length >> 2; + ce->Length += sizeof (SGE_CHAIN64); + } else { + this_seg_lim = nseg; + ce->Length = (this_seg_lim - seg) * + sizeof (SGE_SIMPLE64); + } + ce->Length = htole16(ce->Length); + + /* + * Fill in the chain list SGE elements with our segment data. + * + * If we're the last element in this chain list, set the last + * element flag. If we're the completely last element period, + * set the end of list and end of buffer flags. + */ + while (seg < this_seg_lim) { + memset(se, 0, sizeof (*se)); + se->Address.Low = htole32(dm_segs->ds_addr & + 0xffffffff); + if (sizeof (bus_addr_t) > 4) { + se->Address.High = + htole32(((uint64_t)dm_segs->ds_addr) >> 32); + } + MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); + tf = flags; + if (seg == this_seg_lim - 1) { + tf |= MPI_SGE_FLAGS_LAST_ELEMENT; + } + if (seg == nseg - 1) { + tf |= MPI_SGE_FLAGS_END_OF_LIST | + MPI_SGE_FLAGS_END_OF_BUFFER; + } + MPI_pSGE_SET_FLAGS(se, tf); + se->FlagsLength = htole32(se->FlagsLength); + se++; + seg++; + dm_segs++; + } + + next_chain: + /* + * If we have more segments to do and we've used up all of + * the space in a request area, go allocate another one + * and chain to that. + */ + if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { + request_t *nrq; + + CAMLOCK_2_MPTLOCK(mpt); + nrq = mpt_get_request(mpt, FALSE); + MPTLOCK_2_CAMLOCK(mpt); + + if (nrq == NULL) { + error = ENOMEM; + goto bad; + } + + /* + * Append the new request area on the tail of our list. + */ + if ((trq = req->chain) == NULL) { + req->chain = nrq; + } else { + while (trq->chain != NULL) { + trq = trq->chain; + } + trq->chain = nrq; + } + trq = nrq; + mpt_off = trq->req_vbuf; + if (mpt->verbose >= MPT_PRT_DEBUG) { + memset(mpt_off, 0xff, MPT_REQUEST_AREA); + } + nxt_off = 0; + } + } +out: + + /* + * Last time we need to check if this CCB needs to be aborted. + */ + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { + if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { + request_t *cmd_req = + MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); + MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; + MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; + MPT_TGT_STATE(mpt, cmd_req)->req = NULL; + } + mpt_prt(mpt, + "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", + ccb->ccb_h.status & CAM_STATUS_MASK); + if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { + bus_dmamap_unload(mpt->buffer_dmat, req->dmap); + } + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + mpt_free_request(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); + return; + } + + ccb->ccb_h.status |= CAM_SIM_QUEUED; + if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { + mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, + mpt_timeout, ccb); + } + if (mpt->verbose > MPT_PRT_DEBUG) { + int nc = 0; + mpt_print_request(req->req_vbuf); + for (trq = req->chain; trq; trq = trq->chain) { + kprintf(" Additional Chain Area %d\n", nc++); + mpt_dump_sgl(trq->req_vbuf, 0); + } + } + + if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { + request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); + mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); +#ifdef WE_TRUST_AUTO_GOOD_STATUS + if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && + csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { + tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; + } else { + tgt->state = TGT_STATE_MOVING_DATA; + } +#else + tgt->state = TGT_STATE_MOVING_DATA; +#endif + } + CAMLOCK_2_MPTLOCK(mpt); + mpt_send_cmd(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); +} + +static void +mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) +{ + request_t *req, *trq; + char *mpt_off; + union ccb *ccb; + struct mpt_softc *mpt; + int seg, first_lim; + uint32_t flags, nxt_off; + void *sglp = NULL; + MSG_REQUEST_HEADER *hdrp; + SGE_SIMPLE32 *se; + SGE_CHAIN32 *ce; + int istgt = 0; + + req = (request_t *)arg; + ccb = req->ccb; + + mpt = ccb->ccb_h.ccb_mpt_ptr; + req = ccb->ccb_h.ccb_req_ptr; + + hdrp = req->req_vbuf; + mpt_off = req->req_vbuf; + + + if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { + error = EFBIG; + } + + if (error == 0) { + switch (hdrp->Function) { + case MPI_FUNCTION_SCSI_IO_REQUEST: + case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; + break; + case MPI_FUNCTION_TARGET_ASSIST: + istgt = 1; + sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; + break; + default: + mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", + hdrp->Function); + error = EINVAL; + break; + } + } + + if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { + error = EFBIG; + mpt_prt(mpt, "segment count %d too large (max %u)\n", + nseg, mpt->max_seg_cnt); + } + +bad: + if (error != 0) { + if (error != EFBIG && error != ENOMEM) { + mpt_prt(mpt, "mpt_execute_req: err %d\n", error); + } + if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { + cam_status status; + mpt_freeze_ccb(ccb); + if (error == EFBIG) { + status = CAM_REQ_TOO_BIG; + } else if (error == ENOMEM) { + if (mpt->outofbeer == 0) { + mpt->outofbeer = 1; + xpt_freeze_simq(mpt->sim, 1); + mpt_lprt(mpt, MPT_PRT_DEBUG, + "FREEZEQ\n"); + } + status = CAM_REQUEUE_REQ; + } else { + status = CAM_REQ_CMP_ERR; + } + mpt_set_ccb_status(ccb, status); + } + if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { + request_t *cmd_req = + MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); + MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; + MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; + MPT_TGT_STATE(mpt, cmd_req)->req = NULL; + } + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + mpt_free_request(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); + return; + } + + /* + * No data to transfer? + * Just make a single simple SGL with zero length. + */ + + if (mpt->verbose >= MPT_PRT_DEBUG) { + int tidx = ((char *)sglp) - mpt_off; + memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); + } + + if (nseg == 0) { + SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; + MPI_pSGE_SET_FLAGS(se1, + (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | + MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); + se1->FlagsLength = htole32(se1->FlagsLength); + goto out; + } + + + flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; + if (istgt == 0) { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { + flags |= MPI_SGE_FLAGS_HOST_TO_IOC; + } + } else { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + flags |= MPI_SGE_FLAGS_HOST_TO_IOC; + } + } + + if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { + bus_dmasync_op_t op; + if (istgt) { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + op = BUS_DMASYNC_PREREAD; + } else { + op = BUS_DMASYNC_PREWRITE; + } + } else { + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + op = BUS_DMASYNC_PREWRITE; + } else { + op = BUS_DMASYNC_PREREAD; + } + } + bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); + } + + /* + * Okay, fill in what we can at the end of the command frame. + * If we have up to MPT_NSGL_FIRST, we can fit them all into + * the command frame. + * + * Otherwise, we fill up through MPT_NSGL_FIRST less one + * SIMPLE32 pointers and start doing CHAIN32 entries after + * that. + */ + + if (nseg < MPT_NSGL_FIRST(mpt)) { + first_lim = nseg; + } else { + /* + * Leave room for CHAIN element + */ + first_lim = MPT_NSGL_FIRST(mpt) - 1; + } + + se = (SGE_SIMPLE32 *) sglp; + for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { + uint32_t tf; + + memset(se, 0,sizeof (*se)); + se->Address = htole32(dm_segs->ds_addr); + + + + MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); + tf = flags; + if (seg == first_lim - 1) { + tf |= MPI_SGE_FLAGS_LAST_ELEMENT; + } + if (seg == nseg - 1) { + tf |= MPI_SGE_FLAGS_END_OF_LIST | + MPI_SGE_FLAGS_END_OF_BUFFER; + } + MPI_pSGE_SET_FLAGS(se, tf); + se->FlagsLength = htole32(se->FlagsLength); + } + + if (seg == nseg) { + goto out; + } + + /* + * Tell the IOC where to find the first chain element. + */ + hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; + nxt_off = MPT_RQSL(mpt); + trq = req; + + /* + * Make up the rest of the data segments out of a chain element + * (contiained in the current request frame) which points to + * SIMPLE32 elements in the next request frame, possibly ending + * with *another* chain element (if there's more). + */ + while (seg < nseg) { + int this_seg_lim; + uint32_t tf, cur_off; + bus_addr_t chain_list_addr; + + /* + * Point to the chain descriptor. Note that the chain + * descriptor is at the end of the *previous* list (whether + * chain or simple). + */ + ce = (SGE_CHAIN32 *) se; + + /* + * Before we change our current pointer, make sure we won't + * overflow the request area with this frame. Note that we + * test against 'greater than' here as it's okay in this case + * to have next offset be just outside the request area. + */ + if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { + nxt_off = MPT_REQUEST_AREA; + goto next_chain; + } + + /* + * Set our SGE element pointer to the beginning of the chain + * list and update our next chain list offset. + */ + se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; + cur_off = nxt_off; + nxt_off += MPT_RQSL(mpt); + + /* + * Now initialized the chain descriptor. + */ + memset(ce, 0, sizeof (*ce)); + + /* + * Get the physical address of the chain list. + */ + chain_list_addr = trq->req_pbuf; + chain_list_addr += cur_off; + + + + ce->Address = htole32(chain_list_addr); + ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; + + + /* + * If we have more than a frame's worth of segments left, + * set up the chain list to have the last element be another + * chain descriptor. + */ + if ((nseg - seg) > MPT_NSGL(mpt)) { + this_seg_lim = seg + MPT_NSGL(mpt) - 1; + /* + * The length of the chain is the length in bytes of the + * number of segments plus the next chain element. + * + * The next chain descriptor offset is the length, + * in words, of the number of segments. + */ + ce->Length = (this_seg_lim - seg) * + sizeof (SGE_SIMPLE32); + ce->NextChainOffset = ce->Length >> 2; + ce->Length += sizeof (SGE_CHAIN32); + } else { + this_seg_lim = nseg; + ce->Length = (this_seg_lim - seg) * + sizeof (SGE_SIMPLE32); + } + ce->Length = htole16(ce->Length); + + /* + * Fill in the chain list SGE elements with our segment data. + * + * If we're the last element in this chain list, set the last + * element flag. If we're the completely last element period, + * set the end of list and end of buffer flags. + */ + while (seg < this_seg_lim) { + memset(se, 0, sizeof (*se)); + se->Address = htole32(dm_segs->ds_addr); + + + + + MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); + tf = flags; + if (seg == this_seg_lim - 1) { + tf |= MPI_SGE_FLAGS_LAST_ELEMENT; + } + if (seg == nseg - 1) { + tf |= MPI_SGE_FLAGS_END_OF_LIST | + MPI_SGE_FLAGS_END_OF_BUFFER; + } + MPI_pSGE_SET_FLAGS(se, tf); + se->FlagsLength = htole32(se->FlagsLength); + se++; + seg++; + dm_segs++; + } + + next_chain: + /* + * If we have more segments to do and we've used up all of + * the space in a request area, go allocate another one + * and chain to that. + */ + if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { + request_t *nrq; + + CAMLOCK_2_MPTLOCK(mpt); + nrq = mpt_get_request(mpt, FALSE); + MPTLOCK_2_CAMLOCK(mpt); + + if (nrq == NULL) { + error = ENOMEM; + goto bad; + } + + /* + * Append the new request area on the tail of our list. + */ + if ((trq = req->chain) == NULL) { + req->chain = nrq; + } else { + while (trq->chain != NULL) { + trq = trq->chain; + } + trq->chain = nrq; + } + trq = nrq; + mpt_off = trq->req_vbuf; + if (mpt->verbose >= MPT_PRT_DEBUG) { + memset(mpt_off, 0xff, MPT_REQUEST_AREA); + } + nxt_off = 0; + } + } +out: + + /* + * Last time we need to check if this CCB needs to be aborted. + */ + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { + if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { + request_t *cmd_req = + MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); + MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; + MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; + MPT_TGT_STATE(mpt, cmd_req)->req = NULL; + } + mpt_prt(mpt, + "mpt_execute_req: I/O cancelled (status 0x%x)\n", + ccb->ccb_h.status & CAM_STATUS_MASK); + if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { + bus_dmamap_unload(mpt->buffer_dmat, req->dmap); + } + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + mpt_free_request(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); + return; + } + + ccb->ccb_h.status |= CAM_SIM_QUEUED; + if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { + mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000, + mpt_timeout, ccb); + } + if (mpt->verbose > MPT_PRT_DEBUG) { + int nc = 0; + mpt_print_request(req->req_vbuf); + for (trq = req->chain; trq; trq = trq->chain) { + kprintf(" Additional Chain Area %d\n", nc++); + mpt_dump_sgl(trq->req_vbuf, 0); + } + } + + if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { + request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); + mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); +#ifdef WE_TRUST_AUTO_GOOD_STATUS + if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && + csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { + tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; + } else { + tgt->state = TGT_STATE_MOVING_DATA; + } +#else + tgt->state = TGT_STATE_MOVING_DATA; +#endif + } + CAMLOCK_2_MPTLOCK(mpt); + mpt_send_cmd(mpt, req); + MPTLOCK_2_CAMLOCK(mpt); +} + +static void +mpt_start(struct cam_sim *sim, union ccb *ccb) +{ + request_t *req; + struct mpt_softc *mpt; + MSG_SCSI_IO_REQUEST *mpt_req; + struct ccb_scsiio *csio = &ccb->csio; + struct ccb_hdr *ccbh = &ccb->ccb_h; + bus_dmamap_callback_t *cb; + target_id_t tgt; + int raid_passthru; + + /* Get the pointer for the physical addapter */ + mpt = ccb->ccb_h.ccb_mpt_ptr; + raid_passthru = (sim == mpt->phydisk_sim); + + CAMLOCK_2_MPTLOCK(mpt); + if ((req = mpt_get_request(mpt, FALSE)) == NULL) { + if (mpt->outofbeer == 0) { + mpt->outofbeer = 1; + xpt_freeze_simq(mpt->sim, 1); + mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); + } + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); + MPTLOCK_2_CAMLOCK(mpt); + xpt_done(ccb); + return; + } +#ifdef INVARIANTS + mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); +#endif + MPTLOCK_2_CAMLOCK(mpt); + + if (sizeof (bus_addr_t) > 4) { + cb = mpt_execute_req_a64; + } else { + cb = mpt_execute_req; + } + + /* + * Link the ccb and the request structure so we can find + * the other knowing either the request or the ccb + */ + req->ccb = ccb; + ccb->ccb_h.ccb_req_ptr = req; + + /* Now we build the command for the IOC */ + mpt_req = req->req_vbuf; + memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); + + mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; + if (raid_passthru) { + mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; + CAMLOCK_2_MPTLOCK(mpt); + if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { + MPTLOCK_2_CAMLOCK(mpt); + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); + xpt_done(ccb); + return; + } + MPTLOCK_2_CAMLOCK(mpt); + mpt_req->Bus = 0; /* we never set bus here */ + } else { + tgt = ccb->ccb_h.target_id; + mpt_req->Bus = 0; /* XXX */ + + } + mpt_req->SenseBufferLength = + (csio->sense_len < MPT_SENSE_SIZE) ? + csio->sense_len : MPT_SENSE_SIZE; + + /* + * We use the message context to find the request structure when we + * Get the command completion interrupt from the IOC. + */ + mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); + + /* Which physical device to do the I/O on */ + mpt_req->TargetID = tgt; + + /* We assume a single level LUN type */ + if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { + mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); + mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; + } else { + mpt_req->LUN[1] = ccb->ccb_h.target_lun; + } + + /* Set the direction of the transfer */ + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + mpt_req->Control = MPI_SCSIIO_CONTROL_READ; + } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { + mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; + } else { + mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; + } + + if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { + switch(ccb->csio.tag_action) { + case MSG_HEAD_OF_Q_TAG: + mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; + break; + case MSG_ACA_TASK: + mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; + break; + case MSG_ORDERED_Q_TAG: + mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; + break; + case MSG_SIMPLE_Q_TAG: + default: + mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; + break; + } + } else { + if (mpt->is_fc || mpt->is_sas) { + mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; + } else { + /* XXX No such thing for a target doing packetized. */ + mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; + } + } + + if (mpt->is_spi) { + if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { + mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; + } + } + mpt_req->Control = htole32(mpt_req->Control); + + /* Copy the scsi command block into place */ + if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { + bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); + } else { + bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); + } + + mpt_req->CDBLength = csio->cdb_len; + mpt_req->DataLength = htole32(csio->dxfer_len); + mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); + + /* + * Do a *short* print here if we're set to MPT_PRT_DEBUG + */ + if (mpt->verbose == MPT_PRT_DEBUG) { + U32 df; + mpt_prt(mpt, "mpt_start: %s op 0x%x ", + (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? + "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); + df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; + if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { + mpt_prtc(mpt, "(%s %u byte%s ", + (df == MPI_SCSIIO_CONTROL_READ)? + "read" : "write", csio->dxfer_len, + (csio->dxfer_len == 1)? ")" : "s)"); + } + mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt, + ccb->ccb_h.target_lun, req, req->serno); + } + + /* + * If we have any data to send with this command map it into bus space. + */ + if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { + if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { + /* + * We've been given a pointer to a single buffer. + */ + if ((ccbh->flags & CAM_DATA_PHYS) == 0) { + /* + * Virtual address that needs to translated into + * one or more physical address ranges. + */ + int error; + int s = splsoftvm(); + error = bus_dmamap_load(mpt->buffer_dmat, + req->dmap, csio->data_ptr, csio->dxfer_len, + cb, req, 0); + splx(s); + if (error == EINPROGRESS) { + /* + * So as to maintain ordering, + * freeze the controller queue + * until our mapping is + * returned. + */ + xpt_freeze_simq(mpt->sim, 1); + ccbh->status |= CAM_RELEASE_SIMQ; + } + } else { + /* + * We have been given a pointer to single + * physical buffer. + */ + struct bus_dma_segment seg; + seg.ds_addr = + (bus_addr_t)(vm_offset_t)csio->data_ptr; + seg.ds_len = csio->dxfer_len; + (*cb)(req, &seg, 1, 0); + } + } else { + /* + * We have been given a list of addresses. + * This case could be easily supported but they are not + * currently generated by the CAM subsystem so there + * is no point in wasting the time right now. + */ + struct bus_dma_segment *segs; + if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { + (*cb)(req, NULL, 0, EFAULT); + } else { + /* Just use the segments provided */ + segs = (struct bus_dma_segment *)csio->data_ptr; + (*cb)(req, segs, csio->sglist_cnt, 0); + } + } + } else { + (*cb)(req, NULL, 0, 0); + } +} + +static int +mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, + int sleep_ok) +{ + int error; + uint16_t status; + uint8_t response; + + error = mpt_scsi_send_tmf(mpt, + (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? + MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : + MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, + mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, + 0, /* XXX How do I get the channel ID? */ + tgt != CAM_TARGET_WILDCARD ? tgt : 0, + lun != CAM_LUN_WILDCARD ? lun : 0, + 0, sleep_ok); + + if (error != 0) { + /* + * mpt_scsi_send_tmf hard resets on failure, so no + * need to do so here. + */ + mpt_prt(mpt, + "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); + return (EIO); + } + + /* Wait for bus reset to be processed by the IOC. */ + error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, + REQ_STATE_DONE, sleep_ok, 5000); + + status = le16toh(mpt->tmf_req->IOCStatus); + response = mpt->tmf_req->ResponseCode; + mpt->tmf_req->state = REQ_STATE_FREE; + + if (error) { + mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " + "Resetting controller.\n"); + mpt_reset(mpt, TRUE); + return (ETIMEDOUT); + } + + if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { + mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " + "Resetting controller.\n", status); + mpt_reset(mpt, TRUE); + return (EIO); + } + + if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && + response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { + mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " + "Resetting controller.\n", response); + mpt_reset(mpt, TRUE); + return (EIO); + } + return (0); +} + +static int +mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) +{ + int r = 0; + request_t *req; + PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; + + req = mpt_get_request(mpt, FALSE); + if (req == NULL) { + return (ENOMEM); + } + fc = req->req_vbuf; + memset(fc, 0, sizeof(*fc)); + fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; + fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; + fc->MsgContext = htole32(req->index | fc_els_handler_id); + mpt_send_cmd(mpt, req); + if (dowait) { + r = mpt_wait_req(mpt, req, REQ_STATE_DONE, + REQ_STATE_DONE, FALSE, 60 * 1000); + if (r == 0) { + mpt_free_request(mpt, req); + } + } + return (r); +} + +static int +mpt_cam_event(struct mpt_softc *mpt, request_t *req, + MSG_EVENT_NOTIFY_REPLY *msg) +{ + uint32_t data0, data1; + + data0 = le32toh(msg->Data[0]); + data1 = le32toh(msg->Data[1]); + switch(msg->Event & 0xFF) { + case MPI_EVENT_UNIT_ATTENTION: + mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", + (data0 >> 8) & 0xff, data0 & 0xff); + break; + + case MPI_EVENT_IOC_BUS_RESET: + /* We generated a bus reset */ + mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", + (data0 >> 8) & 0xff); + xpt_async(AC_BUS_RESET, mpt->path, NULL); + break; + + case MPI_EVENT_EXT_BUS_RESET: + /* Someone else generated a bus reset */ + mpt_prt(mpt, "External Bus Reset Detected\n"); + /* + * These replies don't return EventData like the MPI + * spec says they do + */ + xpt_async(AC_BUS_RESET, mpt->path, NULL); + break; + + case MPI_EVENT_RESCAN: +#if __FreeBSD_version >= 600000 + { + union ccb *ccb; + uint32_t pathid; + /* + * In general this means a device has been added to the loop. + */ + mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); + if (mpt->ready == 0) { + break; + } + if (mpt->phydisk_sim) { + pathid = cam_sim_path(mpt->phydisk_sim); + } else { + pathid = cam_sim_path(mpt->sim); + } + MPTLOCK_2_CAMLOCK(mpt); + /* + * Allocate a CCB, create a wildcard path for this bus, + * and schedule a rescan. + */ + ccb = xpt_alloc_ccb_nowait(); + if (ccb == NULL) { + mpt_prt(mpt, "unable to alloc CCB for rescan\n"); + CAMLOCK_2_MPTLOCK(mpt); + break; + } + + if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid, + CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { + CAMLOCK_2_MPTLOCK(mpt); + mpt_prt(mpt, "unable to create path for rescan\n"); + xpt_free_ccb(ccb); + break; + } + xpt_rescan(ccb); + CAMLOCK_2_MPTLOCK(mpt); + break; + } +#else + mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); + break; +#endif + case MPI_EVENT_LINK_STATUS_CHANGE: + mpt_prt(mpt, "Port %d: LinkState: %s\n", + (data1 >> 8) & 0xff, + ((data0 & 0xff) == 0)? "Failed" : "Active"); + break; + + case MPI_EVENT_LOOP_STATE_CHANGE: + switch ((data0 >> 16) & 0xff) { + case 0x01: + mpt_prt(mpt, + "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " + "(Loop Initialization)\n", + (data1 >> 8) & 0xff, + (data0 >> 8) & 0xff, + (data0 ) & 0xff); + switch ((data0 >> 8) & 0xff) { + case 0xF7: + if ((data0 & 0xff) == 0xF7) { + mpt_prt(mpt, "Device needs AL_PA\n"); + } else { + mpt_prt(mpt, "Device %02x doesn't like " + "FC performance\n", + data0 & 0xFF); + } + break; + case 0xF8: + if ((data0 & 0xff) == 0xF7) { + mpt_prt(mpt, "Device had loop failure " + "at its receiver prior to acquiring" + " AL_PA\n"); + } else { + mpt_prt(mpt, "Device %02x detected loop" + " failure at its receiver\n", + data0 & 0xFF); + } + break; + default: + mpt_prt(mpt, "Device %02x requests that device " + "%02x reset itself\n", + data0 & 0xFF, + (data0 >> 8) & 0xFF); + break; + } + break; + case 0x02: + mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " + "LPE(%02x,%02x) (Loop Port Enable)\n", + (data1 >> 8) & 0xff, /* Port */ + (data0 >> 8) & 0xff, /* Character 3 */ + (data0 ) & 0xff /* Character 4 */); + break; + case 0x03: + mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " + "LPB(%02x,%02x) (Loop Port Bypass)\n", + (data1 >> 8) & 0xff, /* Port */ + (data0 >> 8) & 0xff, /* Character 3 */ + (data0 ) & 0xff /* Character 4 */); + break; + default: + mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " + "FC event (%02x %02x %02x)\n", + (data1 >> 8) & 0xff, /* Port */ + (data0 >> 16) & 0xff, /* Event */ + (data0 >> 8) & 0xff, /* Character 3 */ + (data0 ) & 0xff /* Character 4 */); + } + break; + + case MPI_EVENT_LOGOUT: + mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", + (data1 >> 8) & 0xff, data0); + break; + case MPI_EVENT_QUEUE_FULL: + { + struct cam_sim *sim; + struct cam_path *tmppath; + struct ccb_relsim crs; + PTR_EVENT_DATA_QUEUE_FULL pqf; + lun_id_t lun_id; + + pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; + pqf->CurrentDepth = le16toh(pqf->CurrentDepth); + mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth " + "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); + if (mpt->phydisk_sim) { + sim = mpt->phydisk_sim; + } else { + sim = mpt->sim; + } + MPTLOCK_2_CAMLOCK(mpt); + for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { + if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), + pqf->TargetID, lun_id) != CAM_REQ_CMP) { + mpt_prt(mpt, "unable to create a path to send " + "XPT_REL_SIMQ"); + CAMLOCK_2_MPTLOCK(mpt); + break; + } + xpt_setup_ccb(&crs.ccb_h, tmppath, 5); + crs.ccb_h.func_code = XPT_REL_SIMQ; + crs.release_flags = RELSIM_ADJUST_OPENINGS; + crs.openings = pqf->CurrentDepth - 1; + xpt_action((union ccb *)&crs); + if (crs.ccb_h.status != CAM_REQ_CMP) { + mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); + } + xpt_free_path(tmppath); + } + CAMLOCK_2_MPTLOCK(mpt); + break; + } + case MPI_EVENT_EVENT_CHANGE: + case MPI_EVENT_INTEGRATED_RAID: + case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: + case MPI_EVENT_SAS_SES: + break; + default: + mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", + msg->Event & 0xFF); + return (0); + } + return (1); +} + +/* + * Reply path for all SCSI I/O requests, called from our + * interrupt handler by extracting our handler index from + * the MsgContext field of the reply from the IOC. + * + * This routine is optimized for the common case of a + * completion without error. All exception handling is + * offloaded to non-inlined helper routines to minimize + * cache footprint. + */ +static int +mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, + uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) +{ + MSG_SCSI_IO_REQUEST *scsi_req; + union ccb *ccb; + + if (req->state == REQ_STATE_FREE) { + mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); + return (TRUE); + } + + scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; + ccb = req->ccb; + if (ccb == NULL) { + mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", + req, req->serno); + return (TRUE); + } + + mpt_req_untimeout(req, mpt_timeout, ccb); + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + + if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { + bus_dmasync_op_t op; + + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) + op = BUS_DMASYNC_POSTREAD; + else + op = BUS_DMASYNC_POSTWRITE; + bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); + bus_dmamap_unload(mpt->buffer_dmat, req->dmap); + } + + if (reply_frame == NULL) { + /* + * Context only reply, completion without error status. + */ + ccb->csio.resid = 0; + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + ccb->csio.scsi_status = SCSI_STATUS_OK; + } else { + mpt_scsi_reply_frame_handler(mpt, req, reply_frame); + } + + if (mpt->outofbeer) { + ccb->ccb_h.status |= CAM_RELEASE_SIMQ; + mpt->outofbeer = 0; + mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); + } + if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { + struct scsi_inquiry_data *iq = + (struct scsi_inquiry_data *)ccb->csio.data_ptr; + if (scsi_req->Function == + MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + /* + * Fake out the device type so that only the + * pass-thru device will attach. + */ + iq->device &= ~0x1F; + iq->device |= T_NODEVICE; + } + } + if (mpt->verbose == MPT_PRT_DEBUG) { + mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", + req, req->serno); + } + KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); + MPTLOCK_2_CAMLOCK(mpt); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + if ((req->state & REQ_STATE_TIMEDOUT) == 0) { + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + } else { + mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", + req, req->serno); + TAILQ_REMOVE(&mpt->request_timeout_list, req, links); + } + KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, + ("CCB req needed wakeup")); +#ifdef INVARIANTS + mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); +#endif + mpt_free_request(mpt, req); + return (TRUE); +} + +static int +mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, + uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) +{ + MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; + + KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); +#ifdef INVARIANTS + mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); +#endif + tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; + /* Record IOC Status and Response Code of TMF for any waiters. */ + req->IOCStatus = le16toh(tmf_reply->IOCStatus); + req->ResponseCode = tmf_reply->ResponseCode; + + mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", + req, req->serno, le16toh(tmf_reply->IOCStatus)); + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { + req->state |= REQ_STATE_DONE; + wakeup(req); + } else { + mpt->tmf_req->state = REQ_STATE_FREE; + } + return (TRUE); +} + +/* + * XXX: Move to definitions file + */ +#define ELS 0x22 +#define FC4LS 0x32 +#define ABTS 0x81 +#define BA_ACC 0x84 + +#define LS_RJT 0x01 +#define LS_ACC 0x02 +#define PLOGI 0x03 +#define LOGO 0x05 +#define SRR 0x14 +#define PRLI 0x20 +#define PRLO 0x21 +#define ADISC 0x52 +#define RSCN 0x61 + +static void +mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, + PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) +{ + uint32_t fl; + MSG_LINK_SERVICE_RSP_REQUEST tmp; + PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; + + /* + * We are going to reuse the ELS request to send this response back. + */ + rsp = &tmp; + memset(rsp, 0, sizeof(*rsp)); + +#ifdef USE_IMMEDIATE_LINK_DATA + /* + * Apparently the IMMEDIATE stuff doesn't seem to work. + */ + rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; +#endif + rsp->RspLength = length; + rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; + rsp->MsgContext = htole32(req->index | fc_els_handler_id); + + /* + * Copy over information from the original reply frame to + * it's correct place in the response. + */ + memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); + + /* + * And now copy back the temporary area to the original frame. + */ + memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); + rsp = req->req_vbuf; + +#ifdef USE_IMMEDIATE_LINK_DATA + memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); +#else +{ + PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; + bus_addr_t paddr = req->req_pbuf; + paddr += MPT_RQSL(mpt); + + fl = + MPI_SGE_FLAGS_HOST_TO_IOC | + MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_LAST_ELEMENT | + MPI_SGE_FLAGS_END_OF_LIST | + MPI_SGE_FLAGS_END_OF_BUFFER; + fl <<= MPI_SGE_FLAGS_SHIFT; + fl |= (length); + se->FlagsLength = htole32(fl); + se->Address = htole32((uint32_t) paddr); +} +#endif + + /* + * Send it on... + */ + mpt_send_cmd(mpt, req); +} + +static int +mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, + uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) +{ + PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = + (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; + U8 rctl; + U8 type; + U8 cmd; + U16 status = le16toh(reply_frame->IOCStatus); + U32 *elsbuf; + int ioindex; + int do_refresh = TRUE; + +#ifdef INVARIANTS + KASSERT(mpt_req_on_free_list(mpt, req) == 0, + ("fc_els_reply_handler: req %p:%u for function %x on freelist!", + req, req->serno, rp->Function)); + if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { + mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); + } else { + mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); + } +#endif + mpt_lprt(mpt, MPT_PRT_DEBUG, + "FC_ELS Complete: req %p:%u, reply %p function %x\n", + req, req->serno, reply_frame, reply_frame->Function); + + if (status != MPI_IOCSTATUS_SUCCESS) { + mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", + status, reply_frame->Function); + if (status == MPI_IOCSTATUS_INVALID_STATE) { + /* + * XXX: to get around shutdown issue + */ + mpt->disabled = 1; + return (TRUE); + } + return (TRUE); + } + + /* + * If the function of a link service response, we recycle the + * response to be a refresh for a new link service request. + * + * The request pointer is bogus in this case and we have to fetch + * it based upon the TransactionContext. + */ + if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { + /* Freddie Uncle Charlie Katie */ + /* We don't get the IOINDEX as part of the Link Svc Rsp */ + for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) + if (mpt->els_cmd_ptrs[ioindex] == req) { + break; + } + + KASSERT(ioindex < mpt->els_cmds_allocated, + ("can't find my mommie!")); + + /* remove from active list as we're going to re-post it */ + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + req->state &= ~REQ_STATE_QUEUED; + req->state |= REQ_STATE_DONE; + mpt_fc_post_els(mpt, req, ioindex); + return (TRUE); + } + + if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { + /* remove from active list as we're done */ + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + req->state &= ~REQ_STATE_QUEUED; + req->state |= REQ_STATE_DONE; + if (req->state & REQ_STATE_TIMEDOUT) { + mpt_lprt(mpt, MPT_PRT_DEBUG, + "Sync Primitive Send Completed After Timeout\n"); + mpt_free_request(mpt, req); + } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { + mpt_lprt(mpt, MPT_PRT_DEBUG, + "Async Primitive Send Complete\n"); + mpt_free_request(mpt, req); + } else { + mpt_lprt(mpt, MPT_PRT_DEBUG, + "Sync Primitive Send Complete- Waking Waiter\n"); + wakeup(req); + } + return (TRUE); + } + + if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { + mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " + "Length %d Message Flags %x\n", rp->Function, rp->Flags, + rp->MsgLength, rp->MsgFlags); + return (TRUE); + } + + if (rp->MsgLength <= 5) { + /* + * This is just a ack of an original ELS buffer post + */ + mpt_lprt(mpt, MPT_PRT_DEBUG, + "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); + return (TRUE); + } + + + rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; + type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; + + elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; + cmd = be32toh(elsbuf[0]) >> 24; + + if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { + mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); + return (TRUE); + } + + ioindex = le32toh(rp->TransactionContext); + req = mpt->els_cmd_ptrs[ioindex]; + + if (rctl == ELS && type == 1) { + switch (cmd) { + case PRLI: + /* + * Send back a PRLI ACC + */ + mpt_prt(mpt, "PRLI from 0x%08x%08x\n", + le32toh(rp->Wwn.PortNameHigh), + le32toh(rp->Wwn.PortNameLow)); + elsbuf[0] = htobe32(0x02100014); + elsbuf[1] |= htobe32(0x00000100); + elsbuf[4] = htobe32(0x00000002); + if (mpt->role & MPT_ROLE_TARGET) + elsbuf[4] |= htobe32(0x00000010); + if (mpt->role & MPT_ROLE_INITIATOR) + elsbuf[4] |= htobe32(0x00000020); + /* remove from active list as we're done */ + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + req->state &= ~REQ_STATE_QUEUED; + req->state |= REQ_STATE_DONE; + mpt_fc_els_send_response(mpt, req, rp, 20); + do_refresh = FALSE; + break; + case PRLO: + memset(elsbuf, 0, 5 * (sizeof (U32))); + elsbuf[0] = htobe32(0x02100014); + elsbuf[1] = htobe32(0x08000100); + mpt_prt(mpt, "PRLO from 0x%08x%08x\n", + le32toh(rp->Wwn.PortNameHigh), + le32toh(rp->Wwn.PortNameLow)); + /* remove from active list as we're done */ + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + req->state &= ~REQ_STATE_QUEUED; + req->state |= REQ_STATE_DONE; + mpt_fc_els_send_response(mpt, req, rp, 20); + do_refresh = FALSE; + break; + default: + mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); + break; + } + } else if (rctl == ABTS && type == 0) { + uint16_t rx_id = le16toh(rp->Rxid); + uint16_t ox_id = le16toh(rp->Oxid); + request_t *tgt_req = NULL; + + mpt_prt(mpt, + "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", + ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), + le32toh(rp->Wwn.PortNameLow)); + if (rx_id >= mpt->mpt_max_tgtcmds) { + mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); + } else if (mpt->tgt_cmd_ptrs == NULL) { + mpt_prt(mpt, "No TGT CMD PTRS\n"); + } else { + tgt_req = mpt->tgt_cmd_ptrs[rx_id]; + } + if (tgt_req) { + mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); + union ccb *ccb = tgt->ccb; + uint32_t ct_id; + + /* + * Check to make sure we have the correct command + * The reply descriptor in the target state should + * should contain an IoIndex that should match the + * RX_ID. + * + * It'd be nice to have OX_ID to crosscheck with + * as well. + */ + ct_id = GET_IO_INDEX(tgt->reply_desc); + + if (ct_id != rx_id) { + mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " + "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", + rx_id, ct_id); + goto skip; + } + + ccb = tgt->ccb; + if (ccb) { + mpt_prt(mpt, + "CCB (%p): lun %u flags %x status %x\n", + ccb, ccb->ccb_h.target_lun, + ccb->ccb_h.flags, ccb->ccb_h.status); + } + mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " + "%x nxfers %x\n", tgt->state, + tgt->resid, tgt->bytes_xfered, tgt->reply_desc, + tgt->nxfers); + skip: + if (mpt_abort_target_cmd(mpt, tgt_req)) { + mpt_prt(mpt, "unable to start TargetAbort\n"); + } + } else { + mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); + } + memset(elsbuf, 0, 5 * (sizeof (U32))); + elsbuf[0] = htobe32(0); + elsbuf[1] = htobe32((ox_id << 16) | rx_id); + elsbuf[2] = htobe32(0x000ffff); + /* + * Dork with the reply frame so that the reponse to it + * will be correct. + */ + rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); + /* remove from active list as we're done */ + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + req->state &= ~REQ_STATE_QUEUED; + req->state |= REQ_STATE_DONE; + mpt_fc_els_send_response(mpt, req, rp, 12); + do_refresh = FALSE; + } else { + mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); + } + if (do_refresh == TRUE) { + /* remove from active list as we're done */ + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + req->state &= ~REQ_STATE_QUEUED; + req->state |= REQ_STATE_DONE; + mpt_fc_post_els(mpt, req, ioindex); + } + return (TRUE); +} + +/* + * Clean up all SCSI Initiator personality state in response + * to a controller reset. + */ +static void +mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) +{ + /* + * The pending list is already run down by + * the generic handler. Perform the same + * operation on the timed out request list. + */ + mpt_complete_request_chain(mpt, &mpt->request_timeout_list, + MPI_IOCSTATUS_INVALID_STATE); + + /* + * XXX: We need to repost ELS and Target Command Buffers? + */ + + /* + * Inform the XPT that a bus reset has occurred. + */ + xpt_async(AC_BUS_RESET, mpt->path, NULL); +} + +/* + * Parse additional completion information in the reply + * frame for SCSI I/O requests. + */ +static int +mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, + MSG_DEFAULT_REPLY *reply_frame) +{ + union ccb *ccb; + MSG_SCSI_IO_REPLY *scsi_io_reply; + u_int ioc_status; + u_int sstate; + + MPT_DUMP_REPLY_FRAME(mpt, reply_frame); + KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST + || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, + ("MPT SCSI I/O Handler called with incorrect reply type")); + KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, + ("MPT SCSI I/O Handler called with continuation reply")); + + scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; + ioc_status = le16toh(scsi_io_reply->IOCStatus); + ioc_status &= MPI_IOCSTATUS_MASK; + sstate = scsi_io_reply->SCSIState; + + ccb = req->ccb; + ccb->csio.resid = + ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); + + if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 + && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { + ccb->ccb_h.status |= CAM_AUTOSNS_VALID; + ccb->csio.sense_resid = + ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount); + bcopy(req->sense_vbuf, &ccb->csio.sense_data, + min(ccb->csio.sense_len, + le32toh(scsi_io_reply->SenseCount))); + } + + if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { + /* + * Tag messages rejected, but non-tagged retry + * was successful. +XXXX + mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); + */ + } + + switch(ioc_status) { + case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + /* + * XXX + * Linux driver indicates that a zero + * transfer length with this error code + * indicates a CRC error. + * + * No need to swap the bytes for checking + * against zero. + */ + if (scsi_io_reply->TransferCount == 0) { + mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); + break; + } + /* FALLTHROUGH */ + case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: + case MPI_IOCSTATUS_SUCCESS: + case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: + if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { + /* + * Status was never returned for this transaction. + */ + mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); + } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { + ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; + mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); + if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) + mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); + } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { + + /* XXX Handle SPI-Packet and FCP-2 reponse info. */ + mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); + } else + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + break; + case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: + mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); + break; + case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: + mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); + break; + case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + /* + * Since selection timeouts and "device really not + * there" are grouped into this error code, report + * selection timeout. Selection timeouts are + * typically retried before giving up on the device + * whereas "device not there" errors are considered + * unretryable. + */ + mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); + break; + case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: + mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); + break; + case MPI_IOCSTATUS_SCSI_INVALID_BUS: + mpt_set_ccb_status(ccb, CAM_PATH_INVALID); + break; + case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: + mpt_set_ccb_status(ccb, CAM_TID_INVALID); + break; + case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + ccb->ccb_h.status = CAM_UA_TERMIO; + break; + case MPI_IOCSTATUS_INVALID_STATE: + /* + * The IOC has been reset. Emulate a bus reset. + */ + /* FALLTHROUGH */ + case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: + ccb->ccb_h.status = CAM_SCSI_BUS_RESET; + break; + case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: + /* + * Don't clobber any timeout status that has + * already been set for this transaction. We + * want the SCSI layer to be able to differentiate + * between the command we aborted due to timeout + * and any innocent bystanders. + */ + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) + break; + mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); + break; + + case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: + mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); + break; + case MPI_IOCSTATUS_BUSY: + mpt_set_ccb_status(ccb, CAM_BUSY); + break; + case MPI_IOCSTATUS_INVALID_FUNCTION: + case MPI_IOCSTATUS_INVALID_SGL: + case MPI_IOCSTATUS_INTERNAL_ERROR: + case MPI_IOCSTATUS_INVALID_FIELD: + default: + /* XXX + * Some of the above may need to kick + * of a recovery action!!!! + */ + ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; + break; + } + + if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { + mpt_freeze_ccb(ccb); + } + + return (TRUE); +} + +static void +mpt_action(struct cam_sim *sim, union ccb *ccb) +{ + struct mpt_softc *mpt; + struct ccb_trans_settings *cts; + target_id_t tgt; + lun_id_t lun; + int raid_passthru; + + CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); + + mpt = (struct mpt_softc *)cam_sim_softc(sim); + raid_passthru = (sim == mpt->phydisk_sim); + MPT_LOCK_ASSERT(mpt); + + tgt = ccb->ccb_h.target_id; + lun = ccb->ccb_h.target_lun; + if (raid_passthru && + ccb->ccb_h.func_code != XPT_PATH_INQ && + ccb->ccb_h.func_code != XPT_RESET_BUS && + ccb->ccb_h.func_code != XPT_RESET_DEV) { + CAMLOCK_2_MPTLOCK(mpt); + if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { + MPTLOCK_2_CAMLOCK(mpt); + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); + xpt_done(ccb); + return; + } + MPTLOCK_2_CAMLOCK(mpt); + } + ccb->ccb_h.ccb_mpt_ptr = mpt; + + switch (ccb->ccb_h.func_code) { + case XPT_SCSI_IO: /* Execute the requested I/O operation */ + /* + * Do a couple of preliminary checks... + */ + if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { + if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_REQ_INVALID); + break; + } + } + /* Max supported CDB length is 16 bytes */ + /* XXX Unless we implement the new 32byte message type */ + if (ccb->csio.cdb_len > + sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_REQ_INVALID); + break; + } +#ifdef MPT_TEST_MULTIPATH + if (mpt->failure_id == ccb->ccb_h.target_id) { + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); + break; + } +#endif + ccb->csio.scsi_status = SCSI_STATUS_OK; + mpt_start(sim, ccb); + return; + + case XPT_RESET_BUS: + if (raid_passthru) { + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + break; + } + case XPT_RESET_DEV: + if (ccb->ccb_h.func_code == XPT_RESET_BUS) { + if (bootverbose) { + xpt_print(ccb->ccb_h.path, "reset bus\n"); + } + } else { + xpt_print(ccb->ccb_h.path, "reset device\n"); + } + CAMLOCK_2_MPTLOCK(mpt); + (void) mpt_bus_reset(mpt, tgt, lun, FALSE); + MPTLOCK_2_CAMLOCK(mpt); + + /* + * mpt_bus_reset is always successful in that it + * will fall back to a hard reset should a bus + * reset attempt fail. + */ + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + break; + + case XPT_ABORT: + { + union ccb *accb = ccb->cab.abort_ccb; + CAMLOCK_2_MPTLOCK(mpt); + switch (accb->ccb_h.func_code) { + case XPT_ACCEPT_TARGET_IO: + case XPT_IMMED_NOTIFY: + ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); + break; + case XPT_CONT_TARGET_IO: + mpt_prt(mpt, "cannot abort active CTIOs yet\n"); + ccb->ccb_h.status = CAM_UA_ABORT; + break; + case XPT_SCSI_IO: + ccb->ccb_h.status = CAM_UA_ABORT; + break; + default: + ccb->ccb_h.status = CAM_REQ_INVALID; + break; + } + MPTLOCK_2_CAMLOCK(mpt); + break; + } + +#ifdef CAM_NEW_TRAN_CODE +#define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) +#else +#define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS) +#endif +#define DP_DISC_ENABLE 0x1 +#define DP_DISC_DISABL 0x2 +#define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) + +#define DP_TQING_ENABLE 0x4 +#define DP_TQING_DISABL 0x8 +#define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) + +#define DP_WIDE 0x10 +#define DP_NARROW 0x20 +#define DP_WIDTH (DP_WIDE|DP_NARROW) + +#define DP_SYNC 0x40 + + case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ + { +#ifdef CAM_NEW_TRAN_CODE + struct ccb_trans_settings_scsi *scsi; + struct ccb_trans_settings_spi *spi; +#endif + uint8_t dval; + u_int period; + u_int offset; + int i, j; + + cts = &ccb->cts; + + if (mpt->is_fc || mpt->is_sas) { + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + break; + } + +#ifdef CAM_NEW_TRAN_CODE + scsi = &cts->proto_specific.scsi; + spi = &cts->xport_specific.spi; + + /* + * We can be called just to valid transport and proto versions + */ + if (scsi->valid == 0 && spi->valid == 0) { + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + break; + } +#endif + + /* + * Skip attempting settings on RAID volume disks. + * Other devices on the bus get the normal treatment. + */ + if (mpt->phydisk_sim && raid_passthru == 0 && + mpt_is_raid_volume(mpt, tgt) != 0) { + mpt_lprt(mpt, MPT_PRT_NEGOTIATION, + "no transfer settings for RAID vols\n"); + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + break; + } + + i = mpt->mpt_port_page2.PortSettings & + MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; + j = mpt->mpt_port_page2.PortFlags & + MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; + if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && + j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { + mpt_lprt(mpt, MPT_PRT_ALWAYS, + "honoring BIOS transfer negotiations\n"); + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + break; + } + + dval = 0; + period = 0; + offset = 0; + +#ifndef CAM_NEW_TRAN_CODE + if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { + dval |= (cts->flags & CCB_TRANS_DISC_ENB) ? + DP_DISC_ENABLE : DP_DISC_DISABL; + } + + if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { + dval |= (cts->flags & CCB_TRANS_TAG_ENB) ? + DP_TQING_ENABLE : DP_TQING_DISABL; + } + + if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { + dval |= cts->bus_width ? DP_WIDE : DP_NARROW; + } + + if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) && + (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) { + dval |= DP_SYNC; + period = cts->sync_period; + offset = cts->sync_offset; + } +#else + if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { + dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? + DP_DISC_ENABLE : DP_DISC_DISABL; + } + + if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { + dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? + DP_TQING_ENABLE : DP_TQING_DISABL; + } + + if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { + dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? + DP_WIDE : DP_NARROW; + } + + if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { + dval |= DP_SYNC; + offset = spi->sync_offset; + } else { + PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = + &mpt->mpt_dev_page1[tgt]; + offset = ptr->RequestedParameters; + offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; + offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; + } + if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { + dval |= DP_SYNC; + period = spi->sync_period; + } else { + PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = + &mpt->mpt_dev_page1[tgt]; + period = ptr->RequestedParameters; + period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; + period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; + } +#endif + CAMLOCK_2_MPTLOCK(mpt); + if (dval & DP_DISC_ENABLE) { + mpt->mpt_disc_enable |= (1 << tgt); + } else if (dval & DP_DISC_DISABL) { + mpt->mpt_disc_enable &= ~(1 << tgt); + } + if (dval & DP_TQING_ENABLE) { + mpt->mpt_tag_enable |= (1 << tgt); + } else if (dval & DP_TQING_DISABL) { + mpt->mpt_tag_enable &= ~(1 << tgt); + } + if (dval & DP_WIDTH) { + mpt_setwidth(mpt, tgt, 1); + } + if (dval & DP_SYNC) { + mpt_setsync(mpt, tgt, period, offset); + } + if (dval == 0) { + MPTLOCK_2_CAMLOCK(mpt); + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + break; + } + mpt_lprt(mpt, MPT_PRT_NEGOTIATION, + "set [%d]: 0x%x period 0x%x offset %d\n", + tgt, dval, period, offset); + if (mpt_update_spi_config(mpt, tgt)) { + mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); + } else { + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + } + MPTLOCK_2_CAMLOCK(mpt); + break; + } + case XPT_GET_TRAN_SETTINGS: + { +#ifdef CAM_NEW_TRAN_CODE + struct ccb_trans_settings_scsi *scsi; + cts = &ccb->cts; + cts->protocol = PROTO_SCSI; + if (mpt->is_fc) { + struct ccb_trans_settings_fc *fc = + &cts->xport_specific.fc; + cts->protocol_version = SCSI_REV_SPC; + cts->transport = XPORT_FC; + cts->transport_version = 0; + fc->valid = CTS_FC_VALID_SPEED; + fc->bitrate = 100000; + } else if (mpt->is_sas) { + struct ccb_trans_settings_sas *sas = + &cts->xport_specific.sas; + cts->protocol_version = SCSI_REV_SPC2; + cts->transport = XPORT_SAS; + cts->transport_version = 0; + sas->valid = CTS_SAS_VALID_SPEED; + sas->bitrate = 300000; + } else { + cts->protocol_version = SCSI_REV_2; + cts->transport = XPORT_SPI; + cts->transport_version = 2; + if (mpt_get_spi_settings(mpt, cts) != 0) { + mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); + break; + } + } + scsi = &cts->proto_specific.scsi; + scsi->valid = CTS_SCSI_VALID_TQ; + scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; +#else + cts = &ccb->cts; + if (mpt->is_fc) { + cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; + cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; + cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; + } else if (mpt->is_sas) { + cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB; + cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; + cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; + } else if (mpt_get_spi_settings(mpt, cts) != 0) { + mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); + break; + } +#endif + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + break; + } + case XPT_CALC_GEOMETRY: + { + struct ccb_calc_geometry *ccg; + + ccg = &ccb->ccg; + if (ccg->block_size == 0) { + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_REQ_INVALID); + break; + } + mpt_calc_geometry(ccg, /*extended*/1); + KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__)); + break; + } + case XPT_PATH_INQ: /* Path routing inquiry */ + { + struct ccb_pathinq *cpi = &ccb->cpi; + + cpi->version_num = 1; + cpi->target_sprt = 0; + cpi->hba_eng_cnt = 0; + cpi->max_target = mpt->port_facts[0].MaxDevices - 1; + /* + * FC cards report MAX_DEVICES of 512, but + * the MSG_SCSI_IO_REQUEST target id field + * is only 8 bits. Until we fix the driver + * to support 'channels' for bus overflow, + * just limit it. + */ + if (cpi->max_target > 255) { + cpi->max_target = 255; + } + + /* + * VMware ESX reports > 16 devices and then dies when we probe. + */ + if (mpt->is_spi && cpi->max_target > 15) { + cpi->max_target = 15; + } + if (mpt->is_spi) + cpi->max_lun = 7; + else + cpi->max_lun = MPT_MAX_LUNS; + cpi->initiator_id = mpt->mpt_ini_id; + cpi->bus_id = cam_sim_bus(sim); + + /* + * The base speed is the speed of the underlying connection. + */ +#ifdef CAM_NEW_TRAN_CODE + cpi->protocol = PROTO_SCSI; + if (mpt->is_fc) { + cpi->hba_misc = PIM_NOBUSRESET; + cpi->base_transfer_speed = 100000; + cpi->hba_inquiry = PI_TAG_ABLE; + cpi->transport = XPORT_FC; + cpi->transport_version = 0; + cpi->protocol_version = SCSI_REV_SPC; + } else if (mpt->is_sas) { + cpi->hba_misc = PIM_NOBUSRESET; + cpi->base_transfer_speed = 300000; + cpi->hba_inquiry = PI_TAG_ABLE; + cpi->transport = XPORT_SAS; + cpi->transport_version = 0; + cpi->protocol_version = SCSI_REV_SPC2; + } else { + cpi->hba_misc = PIM_SEQSCAN; + cpi->base_transfer_speed = 3300; + cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; + cpi->transport = XPORT_SPI; + cpi->transport_version = 2; + cpi->protocol_version = SCSI_REV_2; + } +#else + if (mpt->is_fc) { + cpi->hba_misc = PIM_NOBUSRESET; + cpi->base_transfer_speed = 100000; + cpi->hba_inquiry = PI_TAG_ABLE; + } else if (mpt->is_sas) { + cpi->hba_misc = PIM_NOBUSRESET; + cpi->base_transfer_speed = 300000; + cpi->hba_inquiry = PI_TAG_ABLE; + } else { + cpi->hba_misc = PIM_SEQSCAN; + cpi->base_transfer_speed = 3300; + cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; + } +#endif + + /* + * We give our fake RAID passhtru bus a width that is MaxVolumes + * wide and restrict it to one lun. + */ + if (raid_passthru) { + cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; + cpi->initiator_id = cpi->max_target + 1; + cpi->max_lun = 0; + } + + if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { + cpi->hba_misc |= PIM_NOINITIATOR; + } + if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { + cpi->target_sprt = + PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; + } else { + cpi->target_sprt = 0; + } + strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); + strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); + strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); + cpi->unit_number = cam_sim_unit(sim); + cpi->ccb_h.status = CAM_REQ_CMP; + break; + } + case XPT_EN_LUN: /* Enable LUN as a target */ + { + int result; + + CAMLOCK_2_MPTLOCK(mpt); + if (ccb->cel.enable) + result = mpt_enable_lun(mpt, + ccb->ccb_h.target_id, ccb->ccb_h.target_lun); + else + result = mpt_disable_lun(mpt, + ccb->ccb_h.target_id, ccb->ccb_h.target_lun); + MPTLOCK_2_CAMLOCK(mpt); + if (result == 0) { + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + } else { + mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); + } + break; + } + case XPT_NOTIFY_ACK: /* recycle notify ack */ + case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */ + case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ + { + tgt_resource_t *trtp; + lun_id_t lun = ccb->ccb_h.target_lun; + ccb->ccb_h.sim_priv.entries[0].field = 0; + ccb->ccb_h.sim_priv.entries[1].ptr = mpt; + ccb->ccb_h.flags = 0; + + if (lun == CAM_LUN_WILDCARD) { + if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { + mpt_set_ccb_status(ccb, CAM_REQ_INVALID); + break; + } + trtp = &mpt->trt_wildcard; + } else if (lun >= MPT_MAX_LUNS) { + mpt_set_ccb_status(ccb, CAM_REQ_INVALID); + break; + } else { + trtp = &mpt->trt[lun]; + } + CAMLOCK_2_MPTLOCK(mpt); + if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { + mpt_lprt(mpt, MPT_PRT_DEBUG1, + "Put FREE ATIO %p lun %d\n", ccb, lun); + STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, + sim_links.stqe); + } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) { + mpt_lprt(mpt, MPT_PRT_DEBUG1, + "Put FREE INOT lun %d\n", lun); + STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, + sim_links.stqe); + } else { + mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); + } + mpt_set_ccb_status(ccb, CAM_REQ_INPROG); + MPTLOCK_2_CAMLOCK(mpt); + return; + } + case XPT_CONT_TARGET_IO: + CAMLOCK_2_MPTLOCK(mpt); + mpt_target_start_io(mpt, ccb); + MPTLOCK_2_CAMLOCK(mpt); + return; + + default: + ccb->ccb_h.status = CAM_REQ_INVALID; + break; + } + xpt_done(ccb); +} + +static int +mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) +{ +#ifdef CAM_NEW_TRAN_CODE + struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; + struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; +#endif + target_id_t tgt; + uint32_t dval, pval, oval; + int rv; + + if (IS_CURRENT_SETTINGS(cts) == 0) { + tgt = cts->ccb_h.target_id; + } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { + if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { + return (-1); + } + } else { + tgt = cts->ccb_h.target_id; + } + + /* + * We aren't looking at Port Page 2 BIOS settings here- + * sometimes these have been known to be bogus XXX. + * + * For user settings, we pick the max from port page 0 + * + * For current settings we read the current settings out from + * device page 0 for that target. + */ + if (IS_CURRENT_SETTINGS(cts)) { + CONFIG_PAGE_SCSI_DEVICE_0 tmp; + dval = 0; + + CAMLOCK_2_MPTLOCK(mpt); + tmp = mpt->mpt_dev_page0[tgt]; + rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, + sizeof(tmp), FALSE, 5000); + if (rv) { + MPTLOCK_2_CAMLOCK(mpt); + mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); + return (rv); + } + mpt2host_config_page_scsi_device_0(&tmp); + + MPTLOCK_2_CAMLOCK(mpt); + mpt_lprt(mpt, MPT_PRT_DEBUG, + "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, + tmp.NegotiatedParameters, tmp.Information); + dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? + DP_WIDE : DP_NARROW; + dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? + DP_DISC_ENABLE : DP_DISC_DISABL; + dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? + DP_TQING_ENABLE : DP_TQING_DISABL; + oval = tmp.NegotiatedParameters; + oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; + oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; + pval = tmp.NegotiatedParameters; + pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; + pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; + mpt->mpt_dev_page0[tgt] = tmp; + } else { + dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; + oval = mpt->mpt_port_page0.Capabilities; + oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); + pval = mpt->mpt_port_page0.Capabilities; + pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); + } + +#ifndef CAM_NEW_TRAN_CODE + cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); + cts->valid = 0; + cts->sync_period = pval; + cts->sync_offset = oval; + cts->valid |= CCB_TRANS_SYNC_RATE_VALID; + cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID; + cts->valid |= CCB_TRANS_BUS_WIDTH_VALID; + if (dval & DP_WIDE) { + cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; + } else { + cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; + } + if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { + cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID; + if (dval & DP_DISC_ENABLE) { + cts->flags |= CCB_TRANS_DISC_ENB; + } + if (dval & DP_TQING_ENABLE) { + cts->flags |= CCB_TRANS_TAG_ENB; + } + } +#else + spi->valid = 0; + scsi->valid = 0; + spi->flags = 0; + scsi->flags = 0; + spi->sync_offset = oval; + spi->sync_period = pval; + spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; + spi->valid |= CTS_SPI_VALID_SYNC_RATE; + spi->valid |= CTS_SPI_VALID_BUS_WIDTH; + if (dval & DP_WIDE) { + spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; + } else { + spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; + } + if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { + scsi->valid = CTS_SCSI_VALID_TQ; + if (dval & DP_TQING_ENABLE) { + scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; + } + spi->valid |= CTS_SPI_VALID_DISC; + if (dval & DP_DISC_ENABLE) { + spi->flags |= CTS_SPI_FLAGS_DISC_ENB; + } + } +#endif + mpt_lprt(mpt, MPT_PRT_NEGOTIATION, + "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, + IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval); + return (0); +} + +static void +mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) +{ + PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; + + ptr = &mpt->mpt_dev_page1[tgt]; + if (onoff) { + ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; + } else { + ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; + } +} + +static void +mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) +{ + PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; + + ptr = &mpt->mpt_dev_page1[tgt]; + ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; + ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; + ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; + ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; + ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; + if (period == 0) { + return; + } + ptr->RequestedParameters |= + period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; + ptr->RequestedParameters |= + offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; + if (period < 0xa) { + ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; + } + if (period < 0x9) { + ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; + ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; + } +} + +static int +mpt_update_spi_config(struct mpt_softc *mpt, int tgt) +{ + CONFIG_PAGE_SCSI_DEVICE_1 tmp; + int rv; + + mpt_lprt(mpt, MPT_PRT_NEGOTIATION, + "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", + tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); + tmp = mpt->mpt_dev_page1[tgt]; + host2mpt_config_page_scsi_device_1(&tmp); + rv = mpt_write_cur_cfg_page(mpt, tgt, + &tmp.Header, sizeof(tmp), FALSE, 5000); + if (rv) { + mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); + return (-1); + } + return (0); +} + +static void +mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended) +{ +#if __FreeBSD_version >= 500000 + cam_calc_geometry(ccg, extended); +#else + uint32_t size_mb; + uint32_t secs_per_cylinder; + + if (ccg->block_size == 0) { + ccg->ccb_h.status = CAM_REQ_INVALID; + return; + } + size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size); + if (size_mb > 1024 && extended) { + ccg->heads = 255; + ccg->secs_per_track = 63; + } else { + ccg->heads = 64; + ccg->secs_per_track = 32; + } + secs_per_cylinder = ccg->heads * ccg->secs_per_track; + ccg->cylinders = ccg->volume_size / secs_per_cylinder; + ccg->ccb_h.status = CAM_REQ_CMP; +#endif +} + +/****************************** Timeout Recovery ******************************/ +static int +mpt_spawn_recovery_thread(struct mpt_softc *mpt) +{ + int error; + + error = mpt_kthread_create(mpt_recovery_thread, mpt, + &mpt->recovery_thread, /*flags*/0, + /*altstack*/0, "mpt_recovery%d", mpt->unit); + return (error); +} + +static void +mpt_terminate_recovery_thread(struct mpt_softc *mpt) +{ + if (mpt->recovery_thread == NULL) { + return; + } + mpt->shutdwn_recovery = 1; + wakeup(mpt); + /* + * Sleep on a slightly different location + * for this interlock just for added safety. + */ + mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); +} + +static void +mpt_recovery_thread(void *arg) +{ + struct mpt_softc *mpt; + + mpt = (struct mpt_softc *)arg; + MPT_LOCK(mpt); + for (;;) { + if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { + if (mpt->shutdwn_recovery == 0) { + mpt_sleep(mpt, mpt, PUSER, "idle", 0); + } + } + if (mpt->shutdwn_recovery != 0) { + break; + } + mpt_recover_commands(mpt); + } + mpt->recovery_thread = NULL; + wakeup(&mpt->recovery_thread); + MPT_UNLOCK(mpt); + mpt_kthread_exit(0); +} + +static int +mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, + u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) +{ + MSG_SCSI_TASK_MGMT *tmf_req; + int error; + + /* + * Wait for any current TMF request to complete. + * We're only allowed to issue one TMF at a time. + */ + error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, + sleep_ok, MPT_TMF_MAX_TIMEOUT); + if (error != 0) { + mpt_reset(mpt, TRUE); + return (ETIMEDOUT); + } + + mpt_assign_serno(mpt, mpt->tmf_req); + mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; + + tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; + memset(tmf_req, 0, sizeof(*tmf_req)); + tmf_req->TargetID = target; + tmf_req->Bus = channel; + tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; + tmf_req->TaskType = type; + tmf_req->MsgFlags = flags; + tmf_req->MsgContext = + htole32(mpt->tmf_req->index | scsi_tmf_handler_id); + if (lun > MPT_MAX_LUNS) { + tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); + tmf_req->LUN[1] = lun & 0xff; + } else { + tmf_req->LUN[1] = lun; + } + tmf_req->TaskMsgContext = abort_ctx; + + mpt_lprt(mpt, MPT_PRT_DEBUG, + "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, + mpt->tmf_req->serno, tmf_req->MsgContext); + if (mpt->verbose > MPT_PRT_DEBUG) { + mpt_print_request(tmf_req); + } + + KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, + ("mpt_scsi_send_tmf: tmf_req already on pending list")); + TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); + error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); + if (error != MPT_OK) { + TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); + mpt->tmf_req->state = REQ_STATE_FREE; + mpt_reset(mpt, TRUE); + } + return (error); +} + +/* + * When a command times out, it is placed on the requeust_timeout_list + * and we wake our recovery thread. The MPT-Fusion architecture supports + * only a single TMF operation at a time, so we serially abort/bdr, etc, + * the timedout transactions. The next TMF is issued either by the + * completion handler of the current TMF waking our recovery thread, + * or the TMF timeout handler causing a hard reset sequence. + */ +static void +mpt_recover_commands(struct mpt_softc *mpt) +{ + request_t *req; + union ccb *ccb; + int error; + + if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { + /* + * No work to do- leave. + */ + mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); + return; + } + + /* + * Flush any commands whose completion coincides with their timeout. + */ + mpt_intr(mpt); + + if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { + /* + * The timedout commands have already + * completed. This typically means + * that either the timeout value was on + * the hairy edge of what the device + * requires or - more likely - interrupts + * are not happening. + */ + mpt_prt(mpt, "Timedout requests already complete. " + "Interrupts may not be functioning.\n"); + mpt_enable_ints(mpt); + return; + } + + /* + * We have no visibility into the current state of the + * controller, so attempt to abort the commands in the + * order they timed-out. For initiator commands, we + * depend on the reply handler pulling requests off + * the timeout list. + */ + while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { + uint16_t status; + uint8_t response; + MSG_REQUEST_HEADER *hdrp = req->req_vbuf; + + mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", + req, req->serno, hdrp->Function); + ccb = req->ccb; + if (ccb == NULL) { + mpt_prt(mpt, "null ccb in timed out request. " + "Resetting Controller.\n"); + mpt_reset(mpt, TRUE); + continue; + } + mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); + + /* + * Check to see if this is not an initiator command and + * deal with it differently if it is. + */ + switch (hdrp->Function) { + case MPI_FUNCTION_SCSI_IO_REQUEST: + case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + break; + default: + /* + * XXX: FIX ME: need to abort target assists... + */ + mpt_prt(mpt, "just putting it back on the pend q\n"); + TAILQ_REMOVE(&mpt->request_timeout_list, req, links); + TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, + links); + continue; + } + + error = mpt_scsi_send_tmf(mpt, + MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, + htole32(req->index | scsi_io_handler_id), TRUE); + + if (error != 0) { + /* + * mpt_scsi_send_tmf hard resets on failure, so no + * need to do so here. Our queue should be emptied + * by the hard reset. + */ + continue; + } + + error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, + REQ_STATE_DONE, TRUE, 500); + + status = le16toh(mpt->tmf_req->IOCStatus); + response = mpt->tmf_req->ResponseCode; + mpt->tmf_req->state = REQ_STATE_FREE; + + if (error != 0) { + /* + * If we've errored out,, reset the controller. + */ + mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " + "Resetting controller\n"); + mpt_reset(mpt, TRUE); + continue; + } + + if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { + mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " + "Resetting controller.\n", status); + mpt_reset(mpt, TRUE); + continue; + } + + if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && + response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { + mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " + "Resetting controller.\n", response); + mpt_reset(mpt, TRUE); + continue; + } + mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); + } +} + +/************************ Target Mode Support ****************************/ +static void +mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) +{ + MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; + PTR_SGE_TRANSACTION32 tep; + PTR_SGE_SIMPLE32 se; + bus_addr_t paddr; + uint32_t fl; + + paddr = req->req_pbuf; + paddr += MPT_RQSL(mpt); + + fc = req->req_vbuf; + memset(fc, 0, MPT_REQUEST_AREA); + fc->BufferCount = 1; + fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; + fc->MsgContext = htole32(req->index | fc_els_handler_id); + + /* + * Okay, set up ELS buffer pointers. ELS buffer pointers + * consist of a TE SGL element (with details length of zero) + * followe by a SIMPLE SGL element which holds the address + * of the buffer. + */ + + tep = (PTR_SGE_TRANSACTION32) &fc->SGL; + + tep->ContextSize = 4; + tep->Flags = 0; + tep->TransactionContext[0] = htole32(ioindex); + + se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; + fl = + MPI_SGE_FLAGS_HOST_TO_IOC | + MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_LAST_ELEMENT | + MPI_SGE_FLAGS_END_OF_LIST | + MPI_SGE_FLAGS_END_OF_BUFFER; + fl <<= MPI_SGE_FLAGS_SHIFT; + fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); + se->FlagsLength = htole32(fl); + se->Address = htole32((uint32_t) paddr); + mpt_lprt(mpt, MPT_PRT_DEBUG, + "add ELS index %d ioindex %d for %p:%u\n", + req->index, ioindex, req, req->serno); + KASSERT(((req->state & REQ_STATE_LOCKED) != 0), + ("mpt_fc_post_els: request not locked")); + mpt_send_cmd(mpt, req); +} + +static void +mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) +{ + PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; + PTR_CMD_BUFFER_DESCRIPTOR cb; + bus_addr_t paddr; + + paddr = req->req_pbuf; + paddr += MPT_RQSL(mpt); + memset(req->req_vbuf, 0, MPT_REQUEST_AREA); + MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; + + fc = req->req_vbuf; + fc->BufferCount = 1; + fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; + fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); + + cb = &fc->Buffer[0]; + cb->IoIndex = htole16(ioindex); + cb->u.PhysicalAddress32 = htole32((U32) paddr); + + mpt_check_doorbell(mpt); + mpt_send_cmd(mpt, req); +} + +static int +mpt_add_els_buffers(struct mpt_softc *mpt) +{ + int i; + + if (mpt->is_fc == 0) { + return (TRUE); + } + + if (mpt->els_cmds_allocated) { + return (TRUE); + } + + mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *), + M_DEVBUF, M_NOWAIT | M_ZERO); + + if (mpt->els_cmd_ptrs == NULL) { + return (FALSE); + } + + /* + * Feed the chip some ELS buffer resources + */ + for (i = 0; i < MPT_MAX_ELS; i++) { + request_t *req = mpt_get_request(mpt, FALSE); + if (req == NULL) { + break; + } + req->state |= REQ_STATE_LOCKED; + mpt->els_cmd_ptrs[i] = req; + mpt_fc_post_els(mpt, req, i); + } + + if (i == 0) { + mpt_prt(mpt, "unable to add ELS buffer resources\n"); + kfree(mpt->els_cmd_ptrs, M_DEVBUF); + mpt->els_cmd_ptrs = NULL; + return (FALSE); + } + if (i != MPT_MAX_ELS) { + mpt_lprt(mpt, MPT_PRT_INFO, + "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); + } + mpt->els_cmds_allocated = i; + return(TRUE); +} + +static int +mpt_add_target_commands(struct mpt_softc *mpt) +{ + int i, max; + + if (mpt->tgt_cmd_ptrs) { + return (TRUE); + } + + max = MPT_MAX_REQUESTS(mpt) >> 1; + if (max > mpt->mpt_max_tgtcmds) { + max = mpt->mpt_max_tgtcmds; + } + mpt->tgt_cmd_ptrs = + kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); + if (mpt->tgt_cmd_ptrs == NULL) { + mpt_prt(mpt, + "mpt_add_target_commands: could not allocate cmd ptrs\n"); + return (FALSE); + } + + for (i = 0; i < max; i++) { + request_t *req; + + req = mpt_get_request(mpt, FALSE); + if (req == NULL) { + break; + } + req->state |= REQ_STATE_LOCKED; + mpt->tgt_cmd_ptrs[i] = req; + mpt_post_target_command(mpt, req, i); + } + + + if (i == 0) { + mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); + kfree(mpt->tgt_cmd_ptrs, M_DEVBUF); + mpt->tgt_cmd_ptrs = NULL; + return (FALSE); + } + + mpt->tgt_cmds_allocated = i; + + if (i < max) { + mpt_lprt(mpt, MPT_PRT_INFO, + "added %d of %d target bufs\n", i, max); + } + return (i); +} + +static int +mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) +{ + if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { + mpt->twildcard = 1; + } else if (lun >= MPT_MAX_LUNS) { + return (EINVAL); + } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { + return (EINVAL); + } + if (mpt->tenabled == 0) { + if (mpt->is_fc) { + (void) mpt_fc_reset_link(mpt, 0); + } + mpt->tenabled = 1; + } + if (lun == CAM_LUN_WILDCARD) { + mpt->trt_wildcard.enabled = 1; + } else { + mpt->trt[lun].enabled = 1; + } + return (0); +} + +static int +mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) +{ + int i; + if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { + mpt->twildcard = 0; + } else if (lun >= MPT_MAX_LUNS) { + return (EINVAL); + } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { + return (EINVAL); + } + if (lun == CAM_LUN_WILDCARD) { + mpt->trt_wildcard.enabled = 0; + } else { + mpt->trt[lun].enabled = 0; + } + for (i = 0; i < MPT_MAX_LUNS; i++) { + if (mpt->trt[lun].enabled) { + break; + } + } + if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { + if (mpt->is_fc) { + (void) mpt_fc_reset_link(mpt, 0); + } + mpt->tenabled = 0; + } + return (0); +} + +/* + * Called with MPT lock held + */ +static void +mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) +{ + struct ccb_scsiio *csio = &ccb->csio; + request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); + mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); + + switch (tgt->state) { + case TGT_STATE_IN_CAM: + break; + case TGT_STATE_MOVING_DATA: + mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); + xpt_freeze_simq(mpt->sim, 1); + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; + MPTLOCK_2_CAMLOCK(mpt); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + return; + default: + mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " + "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); + mpt_tgt_dump_req_state(mpt, cmd_req); + mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); + MPTLOCK_2_CAMLOCK(mpt); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + return; + } + + if (csio->dxfer_len) { + bus_dmamap_callback_t *cb; + PTR_MSG_TARGET_ASSIST_REQUEST ta; + request_t *req; + + KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, + ("dxfer_len %u but direction is NONE\n", csio->dxfer_len)); + + if ((req = mpt_get_request(mpt, FALSE)) == NULL) { + if (mpt->outofbeer == 0) { + mpt->outofbeer = 1; + xpt_freeze_simq(mpt->sim, 1); + mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); + } + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); + MPTLOCK_2_CAMLOCK(mpt); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + return; + } + ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; + if (sizeof (bus_addr_t) > 4) { + cb = mpt_execute_req_a64; + } else { + cb = mpt_execute_req; + } + + req->ccb = ccb; + ccb->ccb_h.ccb_req_ptr = req; + + /* + * Record the currently active ccb and the + * request for it in our target state area. + */ + tgt->ccb = ccb; + tgt->req = req; + + memset(req->req_vbuf, 0, MPT_RQSL(mpt)); + ta = req->req_vbuf; + + if (mpt->is_sas) { + PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = + cmd_req->req_vbuf; + ta->QueueTag = ssp->InitiatorTag; + } else if (mpt->is_spi) { + PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = + cmd_req->req_vbuf; + ta->QueueTag = sp->Tag; + } + ta->Function = MPI_FUNCTION_TARGET_ASSIST; + ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); + ta->ReplyWord = htole32(tgt->reply_desc); + if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { + ta->LUN[0] = + 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); + ta->LUN[1] = csio->ccb_h.target_lun & 0xff; + } else { + ta->LUN[1] = csio->ccb_h.target_lun; + } + + ta->RelativeOffset = tgt->bytes_xfered; + ta->DataLength = ccb->csio.dxfer_len; + if (ta->DataLength > tgt->resid) { + ta->DataLength = tgt->resid; + } + + /* + * XXX Should be done after data transfer completes? + */ + tgt->resid -= csio->dxfer_len; + tgt->bytes_xfered += csio->dxfer_len; + + if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { + ta->TargetAssistFlags |= + TARGET_ASSIST_FLAGS_DATA_DIRECTION; + } + +#ifdef WE_TRUST_AUTO_GOOD_STATUS + if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && + csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { + ta->TargetAssistFlags |= + TARGET_ASSIST_FLAGS_AUTO_STATUS; + } +#endif + tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; + + mpt_lprt(mpt, MPT_PRT_DEBUG, + "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " + "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, + tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); + + MPTLOCK_2_CAMLOCK(mpt); + if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) { + if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) { + int error; + int s = splsoftvm(); + error = bus_dmamap_load(mpt->buffer_dmat, + req->dmap, csio->data_ptr, csio->dxfer_len, + cb, req, 0); + splx(s); + if (error == EINPROGRESS) { + xpt_freeze_simq(mpt->sim, 1); + ccb->ccb_h.status |= CAM_RELEASE_SIMQ; + } + } else { + /* + * We have been given a pointer to single + * physical buffer. + */ + struct bus_dma_segment seg; + seg.ds_addr = (bus_addr_t) + (vm_offset_t)csio->data_ptr; + seg.ds_len = csio->dxfer_len; + (*cb)(req, &seg, 1, 0); + } + } else { + /* + * We have been given a list of addresses. + * This case could be easily supported but they are not + * currently generated by the CAM subsystem so there + * is no point in wasting the time right now. + */ + struct bus_dma_segment *sgs; + if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { + (*cb)(req, NULL, 0, EFAULT); + } else { + /* Just use the segments provided */ + sgs = (struct bus_dma_segment *)csio->data_ptr; + (*cb)(req, sgs, csio->sglist_cnt, 0); + } + } + CAMLOCK_2_MPTLOCK(mpt); + } else { + uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; + + /* + * XXX: I don't know why this seems to happen, but + * XXX: completing the CCB seems to make things happy. + * XXX: This seems to happen if the initiator requests + * XXX: enough data that we have to do multiple CTIOs. + */ + if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { + mpt_lprt(mpt, MPT_PRT_DEBUG, + "Meaningless STATUS CCB (%p): flags %x status %x " + "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, + ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + MPTLOCK_2_CAMLOCK(mpt); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + return; + } + if (ccb->ccb_h.flags & CAM_SEND_SENSE) { + sp = sense; + memcpy(sp, &csio->sense_data, + min(csio->sense_len, MPT_SENSE_SIZE)); + } + mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp); + } +} + +static void +mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, + uint32_t lun, int send, uint8_t *data, size_t length) +{ + mpt_tgt_state_t *tgt; + PTR_MSG_TARGET_ASSIST_REQUEST ta; + SGE_SIMPLE32 *se; + uint32_t flags; + uint8_t *dptr; + bus_addr_t pptr; + request_t *req; + + /* + * We enter with resid set to the data load for the command. + */ + tgt = MPT_TGT_STATE(mpt, cmd_req); + if (length == 0 || tgt->resid == 0) { + tgt->resid = 0; + mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL); + return; + } + + if ((req = mpt_get_request(mpt, FALSE)) == NULL) { + mpt_prt(mpt, "out of resources- dropping local response\n"); + return; + } + tgt->is_local = 1; + + + memset(req->req_vbuf, 0, MPT_RQSL(mpt)); + ta = req->req_vbuf; + + if (mpt->is_sas) { + PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; + ta->QueueTag = ssp->InitiatorTag; + } else if (mpt->is_spi) { + PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; + ta->QueueTag = sp->Tag; + } + ta->Function = MPI_FUNCTION_TARGET_ASSIST; + ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); + ta->ReplyWord = htole32(tgt->reply_desc); + if (lun > MPT_MAX_LUNS) { + ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); + ta->LUN[1] = lun & 0xff; + } else { + ta->LUN[1] = lun; + } + ta->RelativeOffset = 0; + ta->DataLength = length; + + dptr = req->req_vbuf; + dptr += MPT_RQSL(mpt); + pptr = req->req_pbuf; + pptr += MPT_RQSL(mpt); + memcpy(dptr, data, min(length, MPT_RQSL(mpt))); + + se = (SGE_SIMPLE32 *) &ta->SGL[0]; + memset(se, 0,sizeof (*se)); + + flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; + if (send) { + ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; + flags |= MPI_SGE_FLAGS_HOST_TO_IOC; + } + se->Address = pptr; + MPI_pSGE_SET_LENGTH(se, length); + flags |= MPI_SGE_FLAGS_LAST_ELEMENT; + flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; + MPI_pSGE_SET_FLAGS(se, flags); + + tgt->ccb = NULL; + tgt->req = req; + tgt->resid -= length; + tgt->bytes_xfered = length; +#ifdef WE_TRUST_AUTO_GOOD_STATUS + tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; +#else + tgt->state = TGT_STATE_MOVING_DATA; +#endif + mpt_send_cmd(mpt, req); +} + +/* + * Abort queued up CCBs + */ +static cam_status +mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) +{ + struct mpt_hdr_stailq *lp; + struct ccb_hdr *srch; + int found = 0; + union ccb *accb = ccb->cab.abort_ccb; + tgt_resource_t *trtp; + + mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); + + if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { + trtp = &mpt->trt_wildcard; + } else { + trtp = &mpt->trt[ccb->ccb_h.target_lun]; + } + + if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { + lp = &trtp->atios; + } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) { + lp = &trtp->inots; + } else { + return (CAM_REQ_INVALID); + } + + STAILQ_FOREACH(srch, lp, sim_links.stqe) { + if (srch == &accb->ccb_h) { + found = 1; + STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); + break; + } + } + if (found) { + accb->ccb_h.status = CAM_REQ_ABORTED; + xpt_done(accb); + return (CAM_REQ_CMP); + } + mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); + return (CAM_PATH_INVALID); +} + +/* + * Ask the MPT to abort the current target command + */ +static int +mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) +{ + int error; + request_t *req; + PTR_MSG_TARGET_MODE_ABORT abtp; + + req = mpt_get_request(mpt, FALSE); + if (req == NULL) { + return (-1); + } + abtp = req->req_vbuf; + memset(abtp, 0, sizeof (*abtp)); + + abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); + abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; + abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; + abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); + error = 0; + if (mpt->is_fc || mpt->is_sas) { + mpt_send_cmd(mpt, req); + } else { + error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); + } + return (error); +} + +/* + * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting + * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the + * FC929 to set bogus FC_RSP fields (nonzero residuals + * but w/o RESID fields set). This causes QLogic initiators + * to think maybe that a frame was lost. + * + * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because + * we use allocated requests to do TARGET_ASSIST and we + * need to know when to release them. + */ + +static void +mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, + uint8_t status, uint8_t const *sense_data) +{ + uint8_t *cmd_vbuf; + mpt_tgt_state_t *tgt; + PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; + request_t *req; + bus_addr_t paddr; + int resplen = 0; + uint32_t fl; + + cmd_vbuf = cmd_req->req_vbuf; + cmd_vbuf += MPT_RQSL(mpt); + tgt = MPT_TGT_STATE(mpt, cmd_req); + + if ((req = mpt_get_request(mpt, FALSE)) == NULL) { + if (mpt->outofbeer == 0) { + mpt->outofbeer = 1; + xpt_freeze_simq(mpt->sim, 1); + mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); + } + if (ccb) { + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); + MPTLOCK_2_CAMLOCK(mpt); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + } else { + mpt_prt(mpt, + "could not allocate status request- dropping\n"); + } + return; + } + req->ccb = ccb; + if (ccb) { + ccb->ccb_h.ccb_mpt_ptr = mpt; + ccb->ccb_h.ccb_req_ptr = req; + } + + /* + * Record the currently active ccb, if any, and the + * request for it in our target state area. + */ + tgt->ccb = ccb; + tgt->req = req; + tgt->state = TGT_STATE_SENDING_STATUS; + + tp = req->req_vbuf; + paddr = req->req_pbuf; + paddr += MPT_RQSL(mpt); + + memset(tp, 0, sizeof (*tp)); + tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; + if (mpt->is_fc) { + PTR_MPI_TARGET_FCP_CMD_BUFFER fc = + (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; + uint8_t *sts_vbuf; + uint32_t *rsp; + + sts_vbuf = req->req_vbuf; + sts_vbuf += MPT_RQSL(mpt); + rsp = (uint32_t *) sts_vbuf; + memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); + + /* + * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. + * It has to be big-endian in memory and is organized + * in 32 bit words, which are much easier to deal with + * as words which are swizzled as needed. + * + * All we're filling here is the FC_RSP payload. + * We may just have the chip synthesize it if + * we have no residual and an OK status. + * + */ + memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); + + rsp[2] = status; + if (tgt->resid) { + rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */ + rsp[3] = htobe32(tgt->resid); +#ifdef WE_TRUST_AUTO_GOOD_STATUS + resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); +#endif + } + if (status == SCSI_STATUS_CHECK_COND) { + int i; + + rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */ + rsp[4] = htobe32(MPT_SENSE_SIZE); + if (sense_data) { + memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE); + } else { + mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI" + "TION but no sense data?\n"); + memset(&rsp, 0, MPT_SENSE_SIZE); + } + for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) { + rsp[i] = htobe32(rsp[i]); + } +#ifdef WE_TRUST_AUTO_GOOD_STATUS + resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); +#endif + } +#ifndef WE_TRUST_AUTO_GOOD_STATUS + resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER); +#endif + rsp[2] = htobe32(rsp[2]); + } else if (mpt->is_sas) { + PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = + (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; + memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); + } else { + PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = + (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; + tp->StatusCode = status; + tp->QueueTag = htole16(sp->Tag); + memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); + } + + tp->ReplyWord = htole32(tgt->reply_desc); + tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); + +#ifdef WE_CAN_USE_AUTO_REPOST + tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; +#endif + if (status == SCSI_STATUS_OK && resplen == 0) { + tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; + } else { + tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); + fl = + MPI_SGE_FLAGS_HOST_TO_IOC | + MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_LAST_ELEMENT | + MPI_SGE_FLAGS_END_OF_LIST | + MPI_SGE_FLAGS_END_OF_BUFFER; + fl <<= MPI_SGE_FLAGS_SHIFT; + fl |= resplen; + tp->StatusDataSGE.FlagsLength = htole32(fl); + } + + mpt_lprt(mpt, MPT_PRT_DEBUG, + "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n", + ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req, + req->serno, tgt->resid); + if (ccb) { + ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; + mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb); + } + mpt_send_cmd(mpt, req); +} + +static void +mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, + tgt_resource_t *trtp, int init_id) +{ + struct ccb_immed_notify *inot; + mpt_tgt_state_t *tgt; + + tgt = MPT_TGT_STATE(mpt, req); + inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots); + if (inot == NULL) { + mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); + mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL); + return; + } + STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); + mpt_lprt(mpt, MPT_PRT_DEBUG1, + "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun); + + memset(&inot->sense_data, 0, sizeof (inot->sense_data)); + inot->sense_len = 0; + memset(inot->message_args, 0, sizeof (inot->message_args)); + inot->initiator_id = init_id; /* XXX */ + + /* + * This is a somewhat grotesque attempt to map from task management + * to old style SCSI messages. God help us all. + */ + switch (fc) { + case MPT_ABORT_TASK_SET: + inot->message_args[0] = MSG_ABORT_TAG; + break; + case MPT_CLEAR_TASK_SET: + inot->message_args[0] = MSG_CLEAR_TASK_SET; + break; + case MPT_TARGET_RESET: + inot->message_args[0] = MSG_TARGET_RESET; + break; + case MPT_CLEAR_ACA: + inot->message_args[0] = MSG_CLEAR_ACA; + break; + case MPT_TERMINATE_TASK: + inot->message_args[0] = MSG_ABORT_TAG; + break; + default: + inot->message_args[0] = MSG_NOOP; + break; + } + tgt->ccb = (union ccb *) inot; + inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; + MPTLOCK_2_CAMLOCK(mpt); + xpt_done((union ccb *)inot); + CAMLOCK_2_MPTLOCK(mpt); +} + +static void +mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) +{ + static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { + 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, + 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', + 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', + 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', + '0', '0', '0', '1' + }; + struct ccb_accept_tio *atiop; + lun_id_t lun; + int tag_action = 0; + mpt_tgt_state_t *tgt; + tgt_resource_t *trtp = NULL; + U8 *lunptr; + U8 *vbuf; + U16 itag; + U16 ioindex; + mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; + uint8_t *cdbp; + + /* + * First, DMA sync the received command- + * which is in the *request* * phys area. + * + * XXX: We could optimize this for a range + */ + bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, + BUS_DMASYNC_POSTREAD); + + /* + * Stash info for the current command where we can get at it later. + */ + vbuf = req->req_vbuf; + vbuf += MPT_RQSL(mpt); + + /* + * Get our state pointer set up. + */ + tgt = MPT_TGT_STATE(mpt, req); + if (tgt->state != TGT_STATE_LOADED) { + mpt_tgt_dump_req_state(mpt, req); + panic("bad target state in mpt_scsi_tgt_atio"); + } + memset(tgt, 0, sizeof (mpt_tgt_state_t)); + tgt->state = TGT_STATE_IN_CAM; + tgt->reply_desc = reply_desc; + ioindex = GET_IO_INDEX(reply_desc); + if (mpt->verbose >= MPT_PRT_DEBUG) { + mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, + max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), + max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), + sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); + } + if (mpt->is_fc) { + PTR_MPI_TARGET_FCP_CMD_BUFFER fc; + fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; + if (fc->FcpCntl[2]) { + /* + * Task Management Request + */ + switch (fc->FcpCntl[2]) { + case 0x2: + fct = MPT_ABORT_TASK_SET; + break; + case 0x4: + fct = MPT_CLEAR_TASK_SET; + break; + case 0x20: + fct = MPT_TARGET_RESET; + break; + case 0x40: + fct = MPT_CLEAR_ACA; + break; + case 0x80: + fct = MPT_TERMINATE_TASK; + break; + default: + mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", + fc->FcpCntl[2]); + mpt_scsi_tgt_status(mpt, 0, req, + SCSI_STATUS_OK, 0); + return; + } + } else { + switch (fc->FcpCntl[1]) { + case 0: + tag_action = MSG_SIMPLE_Q_TAG; + break; + case 1: + tag_action = MSG_HEAD_OF_Q_TAG; + break; + case 2: + tag_action = MSG_ORDERED_Q_TAG; + break; + default: + /* + * Bah. Ignore Untagged Queing and ACA + */ + tag_action = MSG_SIMPLE_Q_TAG; + break; + } + } + tgt->resid = be32toh(fc->FcpDl); + cdbp = fc->FcpCdb; + lunptr = fc->FcpLun; + itag = be16toh(fc->OptionalOxid); + } else if (mpt->is_sas) { + PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; + ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; + cdbp = ssp->CDB; + lunptr = ssp->LogicalUnitNumber; + itag = ssp->InitiatorTag; + } else { + PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; + sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; + cdbp = sp->CDB; + lunptr = sp->LogicalUnitNumber; + itag = sp->Tag; + } + + /* + * Generate a simple lun + */ + switch (lunptr[0] & 0xc0) { + case 0x40: + lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; + break; + case 0: + lun = lunptr[1]; + break; + default: + mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); + lun = 0xffff; + break; + } + + /* + * Deal with non-enabled or bad luns here. + */ + if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || + mpt->trt[lun].enabled == 0) { + if (mpt->twildcard) { + trtp = &mpt->trt_wildcard; + } else if (fct == MPT_NIL_TMT_VALUE) { + /* + * In this case, we haven't got an upstream listener + * for either a specific lun or wildcard luns. We + * have to make some sensible response. For regular + * inquiry, just return some NOT HERE inquiry data. + * For VPD inquiry, report illegal field in cdb. + * For REQUEST SENSE, just return NO SENSE data. + * REPORT LUNS gets illegal command. + * All other commands get 'no such device'. + */ + uint8_t *sp, cond, buf[MPT_SENSE_SIZE]; + size_t len; + + memset(buf, 0, MPT_SENSE_SIZE); + cond = SCSI_STATUS_CHECK_COND; + buf[0] = 0xf0; + buf[2] = 0x5; + buf[7] = 0x8; + sp = buf; + tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); + + switch (cdbp[0]) { + case INQUIRY: + { + if (cdbp[1] != 0) { + buf[12] = 0x26; + buf[13] = 0x01; + break; + } + len = min(tgt->resid, cdbp[4]); + len = min(len, sizeof (null_iqd)); + mpt_lprt(mpt, MPT_PRT_DEBUG, + "local inquiry %ld bytes\n", (long) len); + mpt_scsi_tgt_local(mpt, req, lun, 1, + null_iqd, len); + return; + } + case REQUEST_SENSE: + { + buf[2] = 0x0; + len = min(tgt->resid, cdbp[4]); + len = min(len, sizeof (buf)); + mpt_lprt(mpt, MPT_PRT_DEBUG, + "local reqsense %ld bytes\n", (long) len); + mpt_scsi_tgt_local(mpt, req, lun, 1, + buf, len); + return; + } + case REPORT_LUNS: + mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); + buf[12] = 0x26; + return; + default: + mpt_lprt(mpt, MPT_PRT_DEBUG, + "CMD 0x%x to unmanaged lun %u\n", + cdbp[0], lun); + buf[12] = 0x25; + break; + } + mpt_scsi_tgt_status(mpt, NULL, req, cond, sp); + return; + } + /* otherwise, leave trtp NULL */ + } else { + trtp = &mpt->trt[lun]; + } + + /* + * Deal with any task management + */ + if (fct != MPT_NIL_TMT_VALUE) { + if (trtp == NULL) { + mpt_prt(mpt, "task mgmt function %x but no listener\n", + fct); + mpt_scsi_tgt_status(mpt, 0, req, + SCSI_STATUS_OK, 0); + } else { + mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, + GET_INITIATOR_INDEX(reply_desc)); + } + return; + } + + + atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); + if (atiop == NULL) { + mpt_lprt(mpt, MPT_PRT_WARN, + "no ATIOs for lun %u- sending back %s\n", lun, + mpt->tenabled? "QUEUE FULL" : "BUSY"); + mpt_scsi_tgt_status(mpt, NULL, req, + mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, + NULL); + return; + } + STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); + mpt_lprt(mpt, MPT_PRT_DEBUG1, + "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun); + atiop->ccb_h.ccb_mpt_ptr = mpt; + atiop->ccb_h.status = CAM_CDB_RECVD; + atiop->ccb_h.target_lun = lun; + atiop->sense_len = 0; + atiop->init_id = GET_INITIATOR_INDEX(reply_desc); + atiop->cdb_len = mpt_cdblen(cdbp[0], 16); + memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); + + /* + * The tag we construct here allows us to find the + * original request that the command came in with. + * + * This way we don't have to depend on anything but the + * tag to find things when CCBs show back up from CAM. + */ + atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); + tgt->tag_id = atiop->tag_id; + if (tag_action) { + atiop->tag_action = tag_action; + atiop->ccb_h.flags = CAM_TAG_ACTION_VALID; + } + if (mpt->verbose >= MPT_PRT_DEBUG) { + int i; + mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop, + atiop->ccb_h.target_lun); + for (i = 0; i < atiop->cdb_len; i++) { + mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, + (i == (atiop->cdb_len - 1))? '>' : ' '); + } + mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", + itag, atiop->tag_id, tgt->reply_desc, tgt->resid); + } + + MPTLOCK_2_CAMLOCK(mpt); + xpt_done((union ccb *)atiop); + CAMLOCK_2_MPTLOCK(mpt); +} + +static void +mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) +{ + mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); + + mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " + "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, + tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, + tgt->tag_id, tgt->state); +} + +static void +mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) +{ + mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, + req->index, req->index, req->state); + mpt_tgt_dump_tgt_state(mpt, req); +} + +static int +mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, + uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) +{ + int dbg; + union ccb *ccb; + U16 status; + + if (reply_frame == NULL) { + /* + * Figure out what the state of the command is. + */ + mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); + +#ifdef INVARIANTS + mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); + if (tgt->req) { + mpt_req_not_spcl(mpt, tgt->req, + "turbo scsi_tgt_reply associated req", __LINE__); + } +#endif + switch(tgt->state) { + case TGT_STATE_LOADED: + /* + * This is a new command starting. + */ + mpt_scsi_tgt_atio(mpt, req, reply_desc); + break; + case TGT_STATE_MOVING_DATA: + { + uint8_t *sp = NULL, sense[MPT_SENSE_SIZE]; + + ccb = tgt->ccb; + if (tgt->req == NULL) { + panic("mpt: turbo target reply with null " + "associated request moving data"); + /* NOTREACHED */ + } + if (ccb == NULL) { + if (tgt->is_local == 0) { + panic("mpt: turbo target reply with " + "null associated ccb moving data"); + /* NOTREACHED */ + } + mpt_lprt(mpt, MPT_PRT_DEBUG, + "TARGET_ASSIST local done\n"); + TAILQ_REMOVE(&mpt->request_pending_list, + tgt->req, links); + mpt_free_request(mpt, tgt->req); + tgt->req = NULL; + mpt_scsi_tgt_status(mpt, NULL, req, + 0, NULL); + return (TRUE); + } + tgt->ccb = NULL; + tgt->nxfers++; + mpt_req_untimeout(req, mpt_timeout, ccb); + mpt_lprt(mpt, MPT_PRT_DEBUG, + "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", + ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); + /* + * Free the Target Assist Request + */ + KASSERT(tgt->req->ccb == ccb, + ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, + tgt->req->serno, tgt->req->ccb)); + TAILQ_REMOVE(&mpt->request_pending_list, + tgt->req, links); + mpt_free_request(mpt, tgt->req); + tgt->req = NULL; + + /* + * Do we need to send status now? That is, are + * we done with all our data transfers? + */ + if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + KASSERT(ccb->ccb_h.status, + ("zero ccb sts at %d\n", __LINE__)); + tgt->state = TGT_STATE_IN_CAM; + if (mpt->outofbeer) { + ccb->ccb_h.status |= CAM_RELEASE_SIMQ; + mpt->outofbeer = 0; + mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); + } + MPTLOCK_2_CAMLOCK(mpt); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + break; + } + /* + * Otherwise, send status (and sense) + */ + if (ccb->ccb_h.flags & CAM_SEND_SENSE) { + sp = sense; + memcpy(sp, &ccb->csio.sense_data, + min(ccb->csio.sense_len, MPT_SENSE_SIZE)); + } + mpt_scsi_tgt_status(mpt, ccb, req, + ccb->csio.scsi_status, sp); + break; + } + case TGT_STATE_SENDING_STATUS: + case TGT_STATE_MOVING_DATA_AND_STATUS: + { + int ioindex; + ccb = tgt->ccb; + + if (tgt->req == NULL) { + panic("mpt: turbo target reply with null " + "associated request sending status"); + /* NOTREACHED */ + } + + if (ccb) { + tgt->ccb = NULL; + if (tgt->state == + TGT_STATE_MOVING_DATA_AND_STATUS) { + tgt->nxfers++; + } + mpt_req_untimeout(req, mpt_timeout, ccb); + if (ccb->ccb_h.flags & CAM_SEND_SENSE) { + ccb->ccb_h.status |= CAM_SENT_SENSE; + } + mpt_lprt(mpt, MPT_PRT_DEBUG, + "TARGET_STATUS tag %x sts %x flgs %x req " + "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, + ccb->ccb_h.flags, tgt->req); + /* + * Free the Target Send Status Request + */ + KASSERT(tgt->req->ccb == ccb, + ("tgt->req %p:%u tgt->req->ccb %p", + tgt->req, tgt->req->serno, tgt->req->ccb)); + /* + * Notify CAM that we're done + */ + mpt_set_ccb_status(ccb, CAM_REQ_CMP); + ccb->ccb_h.status &= ~CAM_SIM_QUEUED; + KASSERT(ccb->ccb_h.status, + ("ZERO ccb sts at %d\n", __LINE__)); + tgt->ccb = NULL; + } else { + mpt_lprt(mpt, MPT_PRT_DEBUG, + "TARGET_STATUS non-CAM for req %p:%u\n", + tgt->req, tgt->req->serno); + } + TAILQ_REMOVE(&mpt->request_pending_list, + tgt->req, links); + mpt_free_request(mpt, tgt->req); + tgt->req = NULL; + + /* + * And re-post the Command Buffer. + * This will reset the state. + */ + ioindex = GET_IO_INDEX(reply_desc); + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + tgt->is_local = 0; + mpt_post_target_command(mpt, req, ioindex); + + /* + * And post a done for anyone who cares + */ + if (ccb) { + if (mpt->outofbeer) { + ccb->ccb_h.status |= CAM_RELEASE_SIMQ; + mpt->outofbeer = 0; + mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); + } + MPTLOCK_2_CAMLOCK(mpt); + xpt_done(ccb); + CAMLOCK_2_MPTLOCK(mpt); + } + break; + } + case TGT_STATE_NIL: /* XXX This Never Happens XXX */ + tgt->state = TGT_STATE_LOADED; + break; + default: + mpt_prt(mpt, "Unknown Target State 0x%x in Context " + "Reply Function\n", tgt->state); + } + return (TRUE); + } + + status = le16toh(reply_frame->IOCStatus); + if (status != MPI_IOCSTATUS_SUCCESS) { + dbg = MPT_PRT_ERROR; + } else { + dbg = MPT_PRT_DEBUG1; + } + + mpt_lprt(mpt, dbg, + "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", + req, req->serno, reply_frame, reply_frame->Function, status); + + switch (reply_frame->Function) { + case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: + { + mpt_tgt_state_t *tgt; +#ifdef INVARIANTS + mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); +#endif + if (status != MPI_IOCSTATUS_SUCCESS) { + /* + * XXX What to do? + */ + break; + } + tgt = MPT_TGT_STATE(mpt, req); + KASSERT(tgt->state == TGT_STATE_LOADING, + ("bad state 0x%x on reply to buffer post\n", tgt->state)); + mpt_assign_serno(mpt, req); + tgt->state = TGT_STATE_LOADED; + break; + } + case MPI_FUNCTION_TARGET_ASSIST: +#ifdef INVARIANTS + mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); +#endif + mpt_prt(mpt, "target assist completion\n"); + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + mpt_free_request(mpt, req); + break; + case MPI_FUNCTION_TARGET_STATUS_SEND: +#ifdef INVARIANTS + mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); +#endif + mpt_prt(mpt, "status send completion\n"); + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + mpt_free_request(mpt, req); + break; + case MPI_FUNCTION_TARGET_MODE_ABORT: + { + PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = + (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; + PTR_MSG_TARGET_MODE_ABORT abtp = + (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; + uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); +#ifdef INVARIANTS + mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); +#endif + mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", + cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + mpt_free_request(mpt, req); + break; + } + default: + mpt_prt(mpt, "Unknown Target Address Reply Function code: " + "0x%x\n", reply_frame->Function); + break; + } + return (TRUE); +} diff --git a/sys/dev/disk/mpt/mpt_cam.h b/sys/dev/disk/mpt/mpt_cam.h new file mode 100644 index 0000000000..d8c7a4b0c9 --- /dev/null +++ b/sys/dev/disk/mpt/mpt_cam.h @@ -0,0 +1,159 @@ +/* $FreeBSD: src/sys/dev/mpt/mpt_cam.h,v 1.6 2007/05/05 20:18:24 mjacob Exp $ */ +/*- + * LSI MPT Host Adapter FreeBSD Wrapper Definitions (CAM version) + * + * Copyright (c) 2000, 2001 by Greg Ansley, Adam Prewett + * + * Partially derived from Matty Jacobs ISP driver. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice immediately at the beginning of the file, without modification, + * this list of conditions, and the following disclaimer. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + */ +/*- + * Copyright (c) 2002, 2006 by Matthew Jacob + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the names of the above listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Support from Chris Ellsworth in order to make SAS adapters work + * is gratefully acknowledged. + * + * Support from LSI-Logic has also gone a great deal toward making this a + * workable subsystem and is gratefully acknowledged. + */ +/*- + * Copyright (c) 2004, Avid Technology, Inc. and its contributors. + * Copyright (c) 2005, WHEEL Sp. z o.o. + * Copyright (c) 2004, 2005 Justin T. Gibbs + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the names of the above listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _MPT_CAM_H_ +#define _MPT_CAM_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ccb_mpt_ptr sim_priv.entries[0].ptr +#define ccb_req_ptr sim_priv.entries[1].ptr + +/************************** CCB Manipulation Routines *************************/ +static __inline void mpt_freeze_ccb(union ccb *ccb); +static __inline void mpt_set_ccb_status(union ccb *ccb, cam_status status); + +static __inline void +mpt_freeze_ccb(union ccb *ccb) +{ + if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { + ccb->ccb_h.status |= CAM_DEV_QFRZN; + xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); + } +} + +static __inline void +mpt_set_ccb_status(union ccb *ccb, cam_status status) +{ + ccb->ccb_h.status &= ~CAM_STATUS_MASK; + ccb->ccb_h.status |= status; +} + +/****************************** Timeout Recovery ******************************/ +/* + * The longest timeout specified for a Task Managent command. + */ +#define MPT_TMF_MAX_TIMEOUT (20000) + +static __inline void +mpt_wakeup_recovery_thread(struct mpt_softc *mpt) +{ + wakeup(mpt); +} + +/************************** Version Compatibility *************************/ +#ifdef __DragonFly__ +#define mpt_sim_alloc(a, b, c, mpt, e, f, g) \ + cam_sim_alloc(a, b, c, mpt, (mpt)->unit, &sim_mplock, e, f, g) +#else +#if __FreeBSD_version < 700031 +#define mpt_sim_alloc(a, b, c, mpt, e, f, g) \ + cam_sim_alloc(a, b, c, mpt, (mpt)->unit, e, f, g) +#else +#define mpt_sim_alloc(a, b, c, mpt, e, f, g) \ + cam_sim_alloc(a, b, c, mpt, (mpt)->unit, &(mpt)->mpt_lock, e, f, g) +#endif +#endif /*_MPT_CAM_H_ */ +#endif diff --git a/sys/dev/disk/mpt/mpt_debug.c b/sys/dev/disk/mpt/mpt_debug.c index 2a6337fc47..cf39fb3d0f 100644 --- a/sys/dev/disk/mpt/mpt_debug.c +++ b/sys/dev/disk/mpt/mpt_debug.c @@ -61,19 +61,19 @@ * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. + * $FreeBSD: src/sys/dev/mpt/mpt_debug.c,v 1.18 2006/12/07 22:02:28 mjacob Exp $ */ #include -__FBSDID("$FreeBSD: src/sys/dev/mpt/mpt_debug.c,v 1.18 2006/12/07 22:02:28 mjacob Exp $"); -#include +#include -#include -#include -#include -#include +#include +#include +#include +#include -#include +#include #include /* for use by mpt_prt below */ @@ -218,7 +218,7 @@ mpt_ioc_status(int code) return status->Error_String; status++; } - snprintf(buf, sizeof buf, "Unknown (0x%08x)", code); + ksnprintf(buf, sizeof buf, "Unknown (0x%08x)", code); return buf; } @@ -230,10 +230,10 @@ mpt_ioc_diag(u_int32_t code) char *ptr = buf; char *end = &buf[128]; buf[0] = '\0'; - ptr += snprintf(buf, sizeof buf, "(0x%08x)", code); + ptr += ksnprintf(buf, sizeof buf, "(0x%08x)", code); while (status->Error_Code >= 0) { if ((status->Error_Code & code) != 0) - ptr += snprintf(ptr, (size_t)(end-ptr), "%s ", + ptr += ksnprintf(ptr, (size_t)(end-ptr), "%s ", status->Error_String); status++; } @@ -250,7 +250,7 @@ mpt_ioc_function(int code) return status->Error_String; status++; } - snprintf(buf, sizeof buf, "Unknown (0x%08x)", code); + ksnprintf(buf, sizeof buf, "Unknown (0x%08x)", code); return buf; } @@ -264,7 +264,7 @@ mpt_ioc_event(int code) return status->Error_String; status++; } - snprintf(buf, sizeof buf, "Unknown (0x%08x)", code); + ksnprintf(buf, sizeof buf, "Unknown (0x%08x)", code); return buf; } @@ -276,10 +276,10 @@ mpt_scsi_state(int code) char *ptr = buf; char *end = &buf[128]; buf[0] = '\0'; - ptr += snprintf(buf, sizeof buf, "(0x%08x)", code); + ptr += ksnprintf(buf, sizeof buf, "(0x%08x)", code); while (status->Error_Code >= 0) { if ((status->Error_Code & code) != 0) - ptr += snprintf(ptr, (size_t)(end-ptr), "%s ", + ptr += ksnprintf(ptr, (size_t)(end-ptr), "%s ", status->Error_String); status++; } @@ -295,7 +295,7 @@ mpt_scsi_status(int code) return status->Error_String; status++; } - snprintf(buf, sizeof buf, "Unknown (0x%08x)", code); + ksnprintf(buf, sizeof buf, "Unknown (0x%08x)", code); return buf; } static char * @@ -340,14 +340,14 @@ mpt_scsi_tm_type(int code) return status->Error_String; status++; } - snprintf(buf, sizeof buf, "Unknown (0x%08x)", code); + ksnprintf(buf, sizeof buf, "Unknown (0x%08x)", code); return buf; } void mpt_print_db(u_int32_t mb) { - printf("mpt mailbox: (0x%x) State %s WhoInit %s\n", + kprintf("mpt mailbox: (0x%x) State %s WhoInit %s\n", mb, mpt_state(mb), mpt_who(MPT_WHO(mb))); } @@ -357,67 +357,67 @@ mpt_print_db(u_int32_t mb) static void mpt_print_reply_hdr(MSG_DEFAULT_REPLY *msg) { - printf("%s Reply @ %p\n", mpt_ioc_function(msg->Function), msg); - printf("\tIOC Status %s\n", mpt_ioc_status(msg->IOCStatus)); - printf("\tIOCLogInfo 0x%08x\n", msg->IOCLogInfo); - printf("\tMsgLength 0x%02x\n", msg->MsgLength); - printf("\tMsgFlags 0x%02x\n", msg->MsgFlags); - printf("\tMsgContext 0x%08x\n", msg->MsgContext); + kprintf("%s Reply @ %p\n", mpt_ioc_function(msg->Function), msg); + kprintf("\tIOC Status %s\n", mpt_ioc_status(msg->IOCStatus)); + kprintf("\tIOCLogInfo 0x%08x\n", msg->IOCLogInfo); + kprintf("\tMsgLength 0x%02x\n", msg->MsgLength); + kprintf("\tMsgFlags 0x%02x\n", msg->MsgFlags); + kprintf("\tMsgContext 0x%08x\n", msg->MsgContext); } static void mpt_print_init_reply(MSG_IOC_INIT_REPLY *msg) { mpt_print_reply_hdr((MSG_DEFAULT_REPLY *)msg); - printf("\tWhoInit %s\n", mpt_who(msg->WhoInit)); - printf("\tMaxDevices 0x%02x\n", msg->MaxDevices); - printf("\tMaxBuses 0x%02x\n", msg->MaxBuses); + kprintf("\tWhoInit %s\n", mpt_who(msg->WhoInit)); + kprintf("\tMaxDevices 0x%02x\n", msg->MaxDevices); + kprintf("\tMaxBuses 0x%02x\n", msg->MaxBuses); } static void mpt_print_ioc_facts(MSG_IOC_FACTS_REPLY *msg) { mpt_print_reply_hdr((MSG_DEFAULT_REPLY *)msg); - printf("\tIOCNumber %d\n", msg->IOCNumber); - printf("\tMaxChainDepth %d\n", msg->MaxChainDepth); - printf("\tWhoInit %s\n", mpt_who(msg->WhoInit)); - printf("\tBlockSize %d\n", msg->BlockSize); - printf("\tFlags %d\n", msg->Flags); - printf("\tReplyQueueDepth %d\n", msg->ReplyQueueDepth); - printf("\tReqFrameSize 0x%04x\n", msg->RequestFrameSize); - printf("\tFW Version 0x%08x\n", msg->FWVersion.Word); - printf("\tProduct ID 0x%04x\n", msg->ProductID); - printf("\tCredits 0x%04x\n", msg->GlobalCredits); - printf("\tPorts %d\n", msg->NumberOfPorts); - printf("\tEventState 0x%02x\n", msg->EventState); - printf("\tHostMFA_HA 0x%08x\n", msg->CurrentHostMfaHighAddr); - printf("\tSenseBuf_HA 0x%08x\n", + kprintf("\tIOCNumber %d\n", msg->IOCNumber); + kprintf("\tMaxChainDepth %d\n", msg->MaxChainDepth); + kprintf("\tWhoInit %s\n", mpt_who(msg->WhoInit)); + kprintf("\tBlockSize %d\n", msg->BlockSize); + kprintf("\tFlags %d\n", msg->Flags); + kprintf("\tReplyQueueDepth %d\n", msg->ReplyQueueDepth); + kprintf("\tReqFrameSize 0x%04x\n", msg->RequestFrameSize); + kprintf("\tFW Version 0x%08x\n", msg->FWVersion.Word); + kprintf("\tProduct ID 0x%04x\n", msg->ProductID); + kprintf("\tCredits 0x%04x\n", msg->GlobalCredits); + kprintf("\tPorts %d\n", msg->NumberOfPorts); + kprintf("\tEventState 0x%02x\n", msg->EventState); + kprintf("\tHostMFA_HA 0x%08x\n", msg->CurrentHostMfaHighAddr); + kprintf("\tSenseBuf_HA 0x%08x\n", msg->CurrentSenseBufferHighAddr); - printf("\tRepFrameSize 0x%04x\n", msg->CurReplyFrameSize); - printf("\tMaxDevices 0x%02x\n", msg->MaxDevices); - printf("\tMaxBuses 0x%02x\n", msg->MaxBuses); - printf("\tFWImageSize 0x%04x\n", msg->FWImageSize); + kprintf("\tRepFrameSize 0x%04x\n", msg->CurReplyFrameSize); + kprintf("\tMaxDevices 0x%02x\n", msg->MaxDevices); + kprintf("\tMaxBuses 0x%02x\n", msg->MaxBuses); + kprintf("\tFWImageSize 0x%04x\n", msg->FWImageSize); } static void mpt_print_enable_reply(MSG_PORT_ENABLE_REPLY *msg) { mpt_print_reply_hdr((MSG_DEFAULT_REPLY *)msg); - printf("\tPort: %d\n", msg->PortNumber); + kprintf("\tPort: %d\n", msg->PortNumber); } static void mpt_print_scsi_io_reply(MSG_SCSI_IO_REPLY *msg) { mpt_print_reply_hdr((MSG_DEFAULT_REPLY *)msg); - printf("\tBus: %d\n", msg->Bus); - printf("\tTargetID %d\n", msg->TargetID); - printf("\tCDBLength %d\n", msg->CDBLength); - printf("\tSCSI Status: %s\n", mpt_scsi_status(msg->SCSIStatus)); - printf("\tSCSI State: %s\n", mpt_scsi_state(msg->SCSIState)); - printf("\tTransferCnt 0x%04x\n", msg->TransferCount); - printf("\tSenseCnt 0x%04x\n", msg->SenseCount); - printf("\tResponseInfo 0x%08x\n", msg->ResponseInfo); + kprintf("\tBus: %d\n", msg->Bus); + kprintf("\tTargetID %d\n", msg->TargetID); + kprintf("\tCDBLength %d\n", msg->CDBLength); + kprintf("\tSCSI Status: %s\n", mpt_scsi_status(msg->SCSIStatus)); + kprintf("\tSCSI State: %s\n", mpt_scsi_state(msg->SCSIState)); + kprintf("\tTransferCnt 0x%04x\n", msg->TransferCount); + kprintf("\tSenseCnt 0x%04x\n", msg->SenseCount); + kprintf("\tResponseInfo 0x%08x\n", msg->ResponseInfo); } @@ -426,51 +426,51 @@ static void mpt_print_event_notice(MSG_EVENT_NOTIFY_REPLY *msg) { mpt_print_reply_hdr((MSG_DEFAULT_REPLY *)msg); - printf("\tEvent: %s\n", mpt_ioc_event(msg->Event)); - printf("\tEventContext 0x%04x\n", msg->EventContext); - printf("\tAckRequired %d\n", msg->AckRequired); - printf("\tEventDataLength %d\n", msg->EventDataLength); - printf("\tContinuation %d\n", msg->MsgFlags & 0x80); + kprintf("\tEvent: %s\n", mpt_ioc_event(msg->Event)); + kprintf("\tEventContext 0x%04x\n", msg->EventContext); + kprintf("\tAckRequired %d\n", msg->AckRequired); + kprintf("\tEventDataLength %d\n", msg->EventDataLength); + kprintf("\tContinuation %d\n", msg->MsgFlags & 0x80); switch(msg->Event) { case MPI_EVENT_LOG_DATA: - printf("\tEvtLogData: 0x%04x\n", msg->Data[0]); + kprintf("\tEvtLogData: 0x%04x\n", msg->Data[0]); break; case MPI_EVENT_UNIT_ATTENTION: - printf("\tTargetID: 0x%04x\n", + kprintf("\tTargetID: 0x%04x\n", msg->Data[0] & 0xff); - printf("\tBus: 0x%04x\n", + kprintf("\tBus: 0x%04x\n", (msg->Data[0] >> 8) & 0xff); break; case MPI_EVENT_IOC_BUS_RESET: case MPI_EVENT_EXT_BUS_RESET: case MPI_EVENT_RESCAN: - printf("\tPort: %d\n", + kprintf("\tPort: %d\n", (msg->Data[0] >> 8) & 0xff); break; case MPI_EVENT_LINK_STATUS_CHANGE: - printf("\tLinkState: %d\n", + kprintf("\tLinkState: %d\n", msg->Data[0] & 0xff); - printf("\tPort: %d\n", + kprintf("\tPort: %d\n", (msg->Data[1] >> 8) & 0xff); break; case MPI_EVENT_LOOP_STATE_CHANGE: - printf("\tType: %d\n", + kprintf("\tType: %d\n", (msg->Data[0] >> 16) & 0xff); - printf("\tChar3: 0x%02x\n", + kprintf("\tChar3: 0x%02x\n", (msg->Data[0] >> 8) & 0xff); - printf("\tChar4: 0x%02x\n", + kprintf("\tChar4: 0x%02x\n", (msg->Data[0] ) & 0xff); - printf("\tPort: %d\n", + kprintf("\tPort: %d\n", (msg->Data[1] >> 8) & 0xff); break; case MPI_EVENT_LOGOUT: - printf("\tN_PortId: 0x%04x\n", msg->Data[0]); - printf("\tPort: %d\n", + kprintf("\tN_PortId: 0x%04x\n", msg->Data[0]); + kprintf("\tPort: %d\n", (msg->Data[1] >> 8) & 0xff); break; } @@ -511,10 +511,10 @@ mpt_print_reply(void *vmsg) static void mpt_print_request_hdr(MSG_REQUEST_HEADER *req) { - printf("%s @ %p\n", mpt_ioc_function(req->Function), req); - printf("\tChain Offset 0x%02x\n", req->ChainOffset); - printf("\tMsgFlags 0x%02x\n", req->MsgFlags); - printf("\tMsgContext 0x%08x\n", req->MsgContext); + kprintf("%s @ %p\n", mpt_ioc_function(req->Function), req); + kprintf("\tChain Offset 0x%02x\n", req->ChainOffset); + kprintf("\tMsgFlags 0x%02x\n", req->MsgFlags); + kprintf("\tMsgContext 0x%08x\n", req->MsgContext); } void @@ -525,14 +525,14 @@ mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *orig_msg) bcopy(orig_msg, msg, sizeof (MSG_SCSI_IO_REQUEST)); mpt_print_request_hdr((MSG_REQUEST_HEADER *)msg); - printf("\tBus: %d\n", msg->Bus); - printf("\tTargetID %d\n", msg->TargetID); - printf("\tSenseBufferLength %d\n", msg->SenseBufferLength); - printf("\tLUN: 0x%0x\n", msg->LUN[1]); - printf("\tControl 0x%08x ", msg->Control); + kprintf("\tBus: %d\n", msg->Bus); + kprintf("\tTargetID %d\n", msg->TargetID); + kprintf("\tSenseBufferLength %d\n", msg->SenseBufferLength); + kprintf("\tLUN: 0x%0x\n", msg->LUN[1]); + kprintf("\tControl 0x%08x ", msg->Control); #define MPI_PRINT_FIELD(x) \ case MPI_SCSIIO_CONTROL_ ## x : \ - printf(" " #x " "); \ + kprintf(" " #x " "); \ break switch (msg->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK) { @@ -540,7 +540,7 @@ mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *orig_msg) MPI_PRINT_FIELD(WRITE); MPI_PRINT_FIELD(READ); default: - printf(" Invalid DIR! "); + kprintf(" Invalid DIR! "); break; } switch (msg->Control & MPI_SCSIIO_CONTROL_TASKATTRIBUTE_MASK) { @@ -551,19 +551,19 @@ mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *orig_msg) MPI_PRINT_FIELD(UNTAGGED); MPI_PRINT_FIELD(NO_DISCONNECT); default: - printf(" Unknown attribute! "); + kprintf(" Unknown attribute! "); break; } - printf("\n"); + kprintf("\n"); #undef MPI_PRINT_FIELD - printf("\tDataLength\t0x%08x\n", msg->DataLength); - printf("\tSenseBufAddr\t0x%08x\n", msg->SenseBufferLowAddr); - printf("\tCDB[0:%d]\t", msg->CDBLength); + kprintf("\tDataLength\t0x%08x\n", msg->DataLength); + kprintf("\tSenseBufAddr\t0x%08x\n", msg->SenseBufferLowAddr); + kprintf("\tCDB[0:%d]\t", msg->CDBLength); for (i = 0; i < msg->CDBLength; i++) - printf("%02x ", msg->CDB[i]); - printf("\n"); + kprintf("%02x ", msg->CDB[i]); + kprintf("\n"); if ((msg->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK) != MPI_SCSIIO_CONTROL_NODATATRANSFER ) { @@ -576,9 +576,9 @@ static void mpt_print_scsi_tmf_request(MSG_SCSI_TASK_MGMT *msg) { mpt_print_request_hdr((MSG_REQUEST_HEADER *)msg); - printf("\tLun 0x%02x\n", msg->LUN[1]); - printf("\tTaskType %s\n", mpt_scsi_tm_type(msg->TaskType)); - printf("\tTaskMsgContext 0x%08x\n", msg->TaskMsgContext); + kprintf("\tLun 0x%02x\n", msg->LUN[1]); + kprintf("\tTaskType %s\n", mpt_scsi_tm_type(msg->TaskType)); + kprintf("\tTaskMsgContext 0x%08x\n", msg->TaskMsgContext); } @@ -586,13 +586,13 @@ static void mpt_print_scsi_target_assist_request(PTR_MSG_TARGET_ASSIST_REQUEST msg) { mpt_print_request_hdr((MSG_REQUEST_HEADER *)msg); - printf("\tStatusCode 0x%02x\n", msg->StatusCode); - printf("\tTargetAssist 0x%02x\n", msg->TargetAssistFlags); - printf("\tQueueTag 0x%04x\n", msg->QueueTag); - printf("\tReplyWord 0x%08x\n", msg->ReplyWord); - printf("\tLun 0x%02x\n", msg->LUN[1]); - printf("\tRelativeOff 0x%08x\n", msg->RelativeOffset); - printf("\tDataLength 0x%08x\n", msg->DataLength); + kprintf("\tStatusCode 0x%02x\n", msg->StatusCode); + kprintf("\tTargetAssist 0x%02x\n", msg->TargetAssistFlags); + kprintf("\tQueueTag 0x%04x\n", msg->QueueTag); + kprintf("\tReplyWord 0x%08x\n", msg->ReplyWord); + kprintf("\tLun 0x%02x\n", msg->LUN[1]); + kprintf("\tRelativeOff 0x%08x\n", msg->RelativeOffset); + kprintf("\tDataLength 0x%08x\n", msg->DataLength); mpt_dump_sgl(msg->SGL, 0); } @@ -601,11 +601,11 @@ mpt_print_scsi_target_status_send_request(MSG_TARGET_STATUS_SEND_REQUEST *msg) { SGE_IO_UNION x; mpt_print_request_hdr((MSG_REQUEST_HEADER *)msg); - printf("\tStatusCode 0x%02x\n", msg->StatusCode); - printf("\tStatusFlags 0x%02x\n", msg->StatusFlags); - printf("\tQueueTag 0x%04x\n", msg->QueueTag); - printf("\tReplyWord 0x%08x\n", msg->ReplyWord); - printf("\tLun 0x%02x\n", msg->LUN[1]); + kprintf("\tStatusCode 0x%02x\n", msg->StatusCode); + kprintf("\tStatusFlags 0x%02x\n", msg->StatusFlags); + kprintf("\tQueueTag 0x%04x\n", msg->QueueTag); + kprintf("\tReplyWord 0x%08x\n", msg->ReplyWord); + kprintf("\tLun 0x%02x\n", msg->LUN[1]); x.u.Simple = msg->StatusDataSGE; mpt_dump_sgl(&x, 0); } @@ -652,12 +652,12 @@ mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries, } if (*cur_column >= wrap_point) { - printf("\n"); + kprintf("\n"); *cur_column = 0; } - printed = printf("%s[0x%x]", name, value); + printed = kprintf("%s[0x%x]", name, value); if (table == NULL) { - printed += printf(" "); + printed += kprintf(" "); *cur_column += printed; return (printed); } @@ -672,7 +672,7 @@ mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries, == table[entry].mask)) continue; - printed += printf("%s%s", + printed += kprintf("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; @@ -682,9 +682,9 @@ mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries, break; } if (printed_mask != 0) - printed += printf(") "); + printed += kprintf(") "); else - printed += printf(" "); + printed += kprintf(" "); *cur_column += printed; return (printed); } @@ -727,10 +727,10 @@ mpt_dump_sgl(SGE_IO_UNION *su, int offset) do { int iprt; - printf("\t"); + kprintf("\t"); if (memcmp(se, allfox, 4) == 0) { uint32_t *nxt = (uint32_t *)se; - printf("PAD %p\n", se); + kprintf("PAD %p\n", se); nxtaddr = nxt + 1; se = nxtaddr; flags = 0; @@ -742,20 +742,20 @@ mpt_dump_sgl(SGE_IO_UNION *su, int offset) case MPI_SGE_FLAGS_SIMPLE_ELEMENT: if (flags & MPI_SGE_FLAGS_64_BIT_ADDRESSING) { SGE_SIMPLE64 *se64 = (SGE_SIMPLE64 *)se; - printf("SE64 %p: Addr=0x%08x%08x FlagsLength" + kprintf("SE64 %p: Addr=0x%08x%08x FlagsLength" "=0x%0x\n", se64, se64->Address.High, se64->Address.Low, se64->FlagsLength); nxtaddr = se64 + 1; } else { - printf("SE32 %p: Addr=0x%0x FlagsLength=0x%0x" + kprintf("SE32 %p: Addr=0x%0x FlagsLength=0x%0x" "\n", se, se->Address, se->FlagsLength); } - printf(" "); + kprintf(" "); break; case MPI_SGE_FLAGS_CHAIN_ELEMENT: if (flags & MPI_SGE_FLAGS_64_BIT_ADDRESSING) { SGE_CHAIN64 *ce64 = (SGE_CHAIN64 *) se; - printf("CE64 %p: Addr=0x%08x%08x NxtChnO=0x%x " + kprintf("CE64 %p: Addr=0x%08x%08x NxtChnO=0x%x " "Flgs=0x%x Len=0x%0x\n", ce64, ce64->Address.High, ce64->Address.Low, ce64->NextChainOffset, @@ -763,14 +763,14 @@ mpt_dump_sgl(SGE_IO_UNION *su, int offset) nxtaddr = ce64 + 1; } else { SGE_CHAIN32 *ce = (SGE_CHAIN32 *) se; - printf("CE32 %p: Addr=0x%0x NxtChnO=0x%x " + kprintf("CE32 %p: Addr=0x%0x NxtChnO=0x%x " " Flgs=0x%x Len=0x%0x\n", ce, ce->Address, ce->NextChainOffset, ce->Flags, ce->Length); } flags = 0; break; case MPI_SGE_FLAGS_TRANSACTION_ELEMENT: - printf("TE32 @ %p\n", se); + kprintf("TE32 @ %p\n", se); flags = 0; break; } @@ -778,10 +778,10 @@ mpt_dump_sgl(SGE_IO_UNION *su, int offset) #define MPT_PRINT_FLAG(x) \ if (flags & MPI_SGE_FLAGS_ ## x ) { \ if (iprt == 0) { \ - printf("\t"); \ + kprintf("\t"); \ } \ - printf(" "); \ - printf( #x ); \ + kprintf(" "); \ + kprintf( #x ); \ iprt++; \ } MPT_PRINT_FLAG(LOCAL_ADDRESS); @@ -792,7 +792,7 @@ mpt_dump_sgl(SGE_IO_UNION *su, int offset) MPT_PRINT_FLAG(END_OF_LIST); #undef MPT_PRINT_FLAG if (iprt) - printf("\n"); + kprintf("\n"); se = nxtaddr; if ((flags & LAST_SGE) == LAST_SGE) { break; @@ -841,23 +841,23 @@ mpt_dump_request(struct mpt_softc *mpt, request_t *req) void mpt_lprt(struct mpt_softc *mpt, int level, const char *fmt, ...) { - va_list ap; + __va_list ap; if (level <= mpt->verbose) { - printf("%s: ", device_get_nameunit(mpt->dev)); - va_start(ap, fmt); - vprintf(fmt, ap); - va_end(ap); + kprintf("%s: ", device_get_nameunit(mpt->dev)); + __va_start(ap, fmt); + kvprintf(fmt, ap); + __va_end(ap); } } void mpt_lprtc(struct mpt_softc *mpt, int level, const char *fmt, ...) { - va_list ap; + __va_list ap; if (level <= mpt->verbose) { - va_start(ap, fmt); - vprintf(fmt, ap); - va_end(ap); + __va_start(ap, fmt); + kvprintf(fmt, ap); + __va_end(ap); } } #endif @@ -865,20 +865,20 @@ mpt_lprtc(struct mpt_softc *mpt, int level, const char *fmt, ...) void mpt_prt(struct mpt_softc *mpt, const char *fmt, ...) { - va_list ap; + __va_list ap; - printf("%s: ", device_get_nameunit(mpt->dev)); - va_start(ap, fmt); - vprintf(fmt, ap); - va_end(ap); + kprintf("%s: ", device_get_nameunit(mpt->dev)); + __va_start(ap, fmt); + kvprintf(fmt, ap); + __va_end(ap); } void mpt_prtc(struct mpt_softc *mpt, const char *fmt, ...) { - va_list ap; + __va_list ap; - va_start(ap, fmt); - vprintf(fmt, ap); - va_end(ap); + __va_start(ap, fmt); + kvprintf(fmt, ap); + __va_end(ap); } diff --git a/sys/dev/disk/mpt/mpt_freebsd.c b/sys/dev/disk/mpt/mpt_freebsd.c deleted file mode 100644 index 5e68b7a679..0000000000 --- a/sys/dev/disk/mpt/mpt_freebsd.c +++ /dev/null @@ -1,1482 +0,0 @@ -/* $FreeBSD: src/sys/dev/mpt/mpt_freebsd.c,v 1.3.2.3 2002/09/24 21:37:25 mjacob Exp $ */ -/* $DragonFly: src/sys/dev/disk/mpt/mpt_freebsd.c,v 1.11 2008/05/18 20:30:22 pavalos Exp $ */ -/* - * FreeBSD/CAM specific routines for LSI '909 FC adapters. - * FreeBSD Version. - * - * Copyright (c) 2000, 2001 by Greg Ansley - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice immediately at the beginning of the file, without modification, - * this list of conditions, and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -/* - * Additional Copyright (c) 2002 by Matthew Jacob under same license. - */ - -#include "mpt_freebsd.h" - -static void mpt_poll(struct cam_sim *); -static timeout_t mpttimeout; -static timeout_t mpttimeout2; -static void mpt_action(struct cam_sim *, union ccb *); -static int mpt_setwidth(mpt_softc_t *, int, int); -static int mpt_setsync(mpt_softc_t *, int, int, int); - -void -mpt_cam_attach(mpt_softc_t *mpt) -{ - struct cam_sim *sim; - int maxq; - - mpt->bus = 0; - maxq = (mpt->mpt_global_credits < MPT_MAX_REQUESTS(mpt))? - mpt->mpt_global_credits : MPT_MAX_REQUESTS(mpt); - - - /* - * Construct our SIM entry. - */ - sim = cam_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, - mpt->unit, &sim_mplock, 1, maxq, NULL); - if (sim == NULL) - return; - - /* - * Register exactly the bus. - */ - - if (xpt_bus_register(sim, 0) != CAM_SUCCESS) { - cam_sim_free(sim); - return; - } - - if (xpt_create_path(&mpt->path, NULL, cam_sim_path(sim), - CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { - xpt_bus_deregister(cam_sim_path(sim)); - cam_sim_free(sim); - return; - } - mpt->sim = sim; -} - -void -mpt_cam_detach(mpt_softc_t *mpt) -{ - if (mpt->sim != NULL) { - xpt_free_path(mpt->path); - xpt_bus_deregister(cam_sim_path(mpt->sim)); - cam_sim_free(mpt->sim); - mpt->sim = NULL; - } -} - -/* This routine is used after a system crash to dump core onto the - * swap device. - */ -static void -mpt_poll(struct cam_sim *sim) -{ - mpt_softc_t *mpt = (mpt_softc_t *) cam_sim_softc(sim); - MPT_LOCK(mpt); - mpt_intr(mpt); - MPT_UNLOCK(mpt); -} - -/* - * This routine is called if the 9x9 does not return completion status - * for a command after a CAM specified time. - */ -static void -mpttimeout(void *arg) -{ - request_t *req; - union ccb *ccb = arg; - u_int32_t oseq; - mpt_softc_t *mpt; - - mpt = ccb->ccb_h.ccb_mpt_ptr; - MPT_LOCK(mpt); - req = ccb->ccb_h.ccb_req_ptr; - oseq = req->sequence; - mpt->timeouts++; - if (mpt_intr(mpt)) { - if (req->sequence != oseq) { - device_printf(mpt->dev, "bullet missed in timeout\n"); - MPT_UNLOCK(mpt); - return; - } - device_printf(mpt->dev, "bullet U-turned in timeout: got us\n"); - } - device_printf(mpt->dev, - "time out on request index = 0x%02x sequence = 0x%08x\n", - req->index, req->sequence); - mpt_check_doorbell(mpt); - device_printf(mpt->dev, "Status %08X; Mask %08X; Doorbell %08X\n", - mpt_read(mpt, MPT_OFFSET_INTR_STATUS), - mpt_read(mpt, MPT_OFFSET_INTR_MASK), - mpt_read(mpt, MPT_OFFSET_DOORBELL) ); - kprintf("request state %s\n", mpt_req_state(req->debug)); - if (ccb != req->ccb) { - kprintf("time out: ccb %p != req->ccb %p\n", - ccb,req->ccb); - } - mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); - req->debug = REQ_TIMEOUT; - req->ccb = NULL; - req->link.sle_next = (void *) mpt; - callout_reset(&req->timeout, hz / 10, mpttimeout2, req); - ccb->ccb_h.status = CAM_CMD_TIMEOUT; - ccb->ccb_h.status |= CAM_RELEASE_SIMQ; - mpt->outofbeer = 0; - xpt_done(ccb); - MPT_UNLOCK(mpt); -} - -static void -mpttimeout2(void *arg) -{ - request_t *req = arg; - if (req->debug == REQ_TIMEOUT) { - mpt_softc_t *mpt = (mpt_softc_t *) req->link.sle_next; - - MPT_LOCK(mpt); - mpt_free_request(mpt, req); - MPT_UNLOCK(mpt); - } -} - -/* - * Callback routine from "bus_dmamap_load" or in simple case called directly. - * - * Takes a list of physical segments and builds the SGL for SCSI IO command - * and forwards the commard to the IOC after one last check that CAM has not - * aborted the transaction. - */ -static void -mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) -{ - request_t *req; - union ccb *ccb; - mpt_softc_t *mpt; - MSG_SCSI_IO_REQUEST *mpt_req; - SGE_SIMPLE32 *se; - - req = (request_t *)arg; - ccb = req->ccb; - - mpt = ccb->ccb_h.ccb_mpt_ptr; - req = ccb->ccb_h.ccb_req_ptr; - mpt_req = req->req_vbuf; - - MPT_LOCK(mpt); - - if (error == 0 && nseg > MPT_SGL_MAX) { - error = EFBIG; - } - - if (error != 0) { - if (error != EFBIG) - device_printf(mpt->dev, "bus_dmamap_load returned %d\n", - error); - if (ccb->ccb_h.status == CAM_REQ_INPROG) { - xpt_freeze_devq(ccb->ccb_h.path, 1); - ccb->ccb_h.status = CAM_DEV_QFRZN; - if (error == EFBIG) - ccb->ccb_h.status |= CAM_REQ_TOO_BIG; - else - ccb->ccb_h.status |= CAM_REQ_CMP_ERR; - } - ccb->ccb_h.status &= ~CAM_SIM_QUEUED; - xpt_done(ccb); - mpt_free_request(mpt, req); - MPT_UNLOCK(mpt); - return; - } - - if (nseg > MPT_NSGL_FIRST(mpt)) { - int i, nleft = nseg; - u_int32_t flags; - bus_dmasync_op_t op; - SGE_CHAIN32 *ce; - - mpt_req->DataLength = ccb->csio.dxfer_len; - flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; - if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) - flags |= MPI_SGE_FLAGS_HOST_TO_IOC; - - se = (SGE_SIMPLE32 *) &mpt_req->SGL; - for (i = 0; i < MPT_NSGL_FIRST(mpt) - 1; i++, se++, dm_segs++) { - u_int32_t tf; - - bzero(se, sizeof (*se)); - se->Address = dm_segs->ds_addr; - MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); - tf = flags; - if (i == MPT_NSGL_FIRST(mpt) - 2) { - tf |= MPI_SGE_FLAGS_LAST_ELEMENT; - } - MPI_pSGE_SET_FLAGS(se, tf); - nleft -= 1; - } - - /* - * Tell the IOC where to find the first chain element - */ - mpt_req->ChainOffset = ((char *)se - (char *)mpt_req) >> 2; - - /* - * Until we're finished with all segments... - */ - while (nleft) { - int ntodo; - /* - * Construct the chain element that point to the - * next segment. - */ - ce = (SGE_CHAIN32 *) se++; - if (nleft > MPT_NSGL(mpt)) { - ntodo = MPT_NSGL(mpt) - 1; - ce->NextChainOffset = (MPT_RQSL(mpt) - - sizeof (SGE_SIMPLE32)) >> 2; - ce->Length = MPT_NSGL(mpt) - * sizeof(SGE_SIMPLE32); - } else { - ntodo = nleft; - ce->NextChainOffset = 0; - ce->Length = ntodo * sizeof (SGE_SIMPLE32); - } - ce->Address = req->req_pbuf + - ((char *)se - (char *)mpt_req); - ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; - for (i = 0; i < ntodo; i++, se++, dm_segs++) { - u_int32_t tf; - - bzero(se, sizeof (*se)); - se->Address = dm_segs->ds_addr; - MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); - tf = flags; - if (i == ntodo - 1) { - tf |= MPI_SGE_FLAGS_LAST_ELEMENT; - if (ce->NextChainOffset == 0) { - tf |= - MPI_SGE_FLAGS_END_OF_LIST | - MPI_SGE_FLAGS_END_OF_BUFFER; - } - } - MPI_pSGE_SET_FLAGS(se, tf); - nleft -= 1; - } - - } - - if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) - op = BUS_DMASYNC_PREREAD; - else - op = BUS_DMASYNC_PREWRITE; - if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { - bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); - } - } else if (nseg > 0) { - int i; - u_int32_t flags; - bus_dmasync_op_t op; - - mpt_req->DataLength = ccb->csio.dxfer_len; - flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; - if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) - flags |= MPI_SGE_FLAGS_HOST_TO_IOC; - - /* Copy the segments into our SG list */ - se = (SGE_SIMPLE32 *) &mpt_req->SGL; - for (i = 0; i < nseg; i++, se++, dm_segs++) { - u_int32_t tf; - - bzero(se, sizeof (*se)); - se->Address = dm_segs->ds_addr; - MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); - tf = flags; - if (i == nseg - 1) { - tf |= - MPI_SGE_FLAGS_LAST_ELEMENT | - MPI_SGE_FLAGS_END_OF_BUFFER | - MPI_SGE_FLAGS_END_OF_LIST; - } - MPI_pSGE_SET_FLAGS(se, tf); - } - - if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) - op = BUS_DMASYNC_PREREAD; - else - op = BUS_DMASYNC_PREWRITE; - if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) { - bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); - } - } else { - se = (SGE_SIMPLE32 *) &mpt_req->SGL; - /* - * No data to transfer so we just make a single simple SGL - * with zero length. - */ - MPI_pSGE_SET_FLAGS(se, - (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | - MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); - } - - /* - * Last time we need to check if this CCB needs to be aborted. - */ - if (ccb->ccb_h.status != CAM_REQ_INPROG) { - if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) - bus_dmamap_unload(mpt->buffer_dmat, req->dmap); - mpt_free_request(mpt, req); - xpt_done(ccb); - MPT_UNLOCK(mpt); - return; - } - - ccb->ccb_h.status |= CAM_SIM_QUEUED; - if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { - callout_reset(&ccb->ccb_h.timeout_ch, - (ccb->ccb_h.timeout * hz) / 1000, mpttimeout, ccb); - } - if (mpt->verbose > 1) - mpt_print_scsi_io_request(mpt_req); - mpt_send_cmd(mpt, req); - MPT_UNLOCK(mpt); -} - -static void -mpt_start(union ccb *ccb) -{ - request_t *req; - struct mpt_softc *mpt; - MSG_SCSI_IO_REQUEST *mpt_req; - struct ccb_scsiio *csio = &ccb->csio; - struct ccb_hdr *ccbh = &ccb->ccb_h; - - /* Get the pointer for the physical addapter */ - mpt = ccb->ccb_h.ccb_mpt_ptr; - MPT_LOCK(mpt); - - /* Get a request structure off the free list */ - if ((req = mpt_get_request(mpt)) == NULL) { - if (mpt->outofbeer == 0) { - mpt->outofbeer = 1; - xpt_freeze_simq(mpt->sim, 1); - if (mpt->verbose > 1) { - device_printf(mpt->dev, "FREEZEQ\n"); - } - } - ccb->ccb_h.status = CAM_REQUEUE_REQ; - xpt_done(ccb); - MPT_UNLOCK(mpt); - return; - } - - /* Link the ccb and the request structure so we can find */ - /* the other knowing either the request or the ccb */ - req->ccb = ccb; - ccb->ccb_h.ccb_req_ptr = req; - - /* Now we build the command for the IOC */ - mpt_req = req->req_vbuf; - bzero(mpt_req, sizeof *mpt_req); - - mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; - mpt_req->Bus = mpt->bus; - - mpt_req->SenseBufferLength = - (csio->sense_len < MPT_SENSE_SIZE) ? - csio->sense_len : MPT_SENSE_SIZE; - - /* We use the message context to find the request structure when we */ - /* Get the command competion interrupt from the FC IOC. */ - mpt_req->MsgContext = req->index; - - /* Which physical device to do the I/O on */ - mpt_req->TargetID = ccb->ccb_h.target_id; - mpt_req->LUN[1] = ccb->ccb_h.target_lun; - - /* Set the direction of the transfer */ - if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) - mpt_req->Control = MPI_SCSIIO_CONTROL_READ; - else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) - mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; - else - mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; - - if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { - switch(ccb->csio.tag_action) { - case MSG_HEAD_OF_Q_TAG: - mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; - break; - case MSG_ACA_TASK: - mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; - break; - case MSG_ORDERED_Q_TAG: - mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; - break; - case MSG_SIMPLE_Q_TAG: - default: - mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; - break; - } - } else { - if (mpt->is_fc) - mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; - else - mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; - } - - if (mpt->is_fc == 0) { - if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { - mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; - } - } - - /* Copy the scsi command block into place */ - if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) - bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); - else - bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); - - mpt_req->CDBLength = csio->cdb_len; - mpt_req->DataLength = csio->dxfer_len; - mpt_req->SenseBufferLowAddr = req->sense_pbuf; - - /* - * If we have any data to send with this command, - * map it into bus space. - */ - - if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { - /* - * We've been given a pointer to a single buffer. - */ - if ((ccbh->flags & CAM_DATA_PHYS) == 0) { - /* - * Virtual address that needs to translated into - * one or more physical pages. - */ - int error; - - error = bus_dmamap_load(mpt->buffer_dmat, - req->dmap, csio->data_ptr, csio->dxfer_len, - mpt_execute_req, req, 0); - if (error == EINPROGRESS) { - /* - * So as to maintain ordering, - * freeze the controller queue - * until our mapping is - * returned. - */ - xpt_freeze_simq(mpt->sim, 1); - ccbh->status |= CAM_RELEASE_SIMQ; - } - } else { - /* - * We have been given a pointer to single - * physical buffer. - */ - struct bus_dma_segment seg; - seg.ds_addr = (bus_addr_t)csio->data_ptr; - seg.ds_len = csio->dxfer_len; - mpt_execute_req(req, &seg, 1, 0); - } - } else { - /* - * We have been given a list of addresses. - * These case could be easily done but they are not - * currently generated by the CAM subsystem so there - * is no point in wasting the time right now. - */ - struct bus_dma_segment *segs; - if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) { - mpt_execute_req(req, NULL, 0, EFAULT); - } else { - /* Just use the segments provided */ - segs = (struct bus_dma_segment *)csio->data_ptr; - mpt_execute_req(req, segs, csio->sglist_cnt, - (csio->sglist_cnt < MPT_SGL_MAX)? - 0 : EFBIG); - } - } - } else { - mpt_execute_req(req, NULL, 0, 0); - } - MPT_UNLOCK(mpt); -} - -static int -mpt_bus_reset(union ccb *ccb) -{ - int error; - request_t *req; - mpt_softc_t *mpt; - MSG_SCSI_TASK_MGMT *reset_req; - - /* Get the pointer for the physical adapter */ - mpt = ccb->ccb_h.ccb_mpt_ptr; - - /* Get a request structure off the free list */ - if ((req = mpt_get_request(mpt)) == NULL) { - return (CAM_REQUEUE_REQ); - } - - /* Link the ccb and the request structure so we can find */ - /* the other knowing either the request or the ccb */ - req->ccb = ccb; - ccb->ccb_h.ccb_req_ptr = req; - - reset_req = req->req_vbuf; - bzero(reset_req, sizeof *reset_req); - - reset_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; - reset_req->MsgContext = req->index; - reset_req->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; - if (mpt->is_fc) { - /* - * Should really be TARGET_RESET_OPTION - */ - reset_req->MsgFlags = - MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION; - } - /* Which physical device Reset */ - reset_req->TargetID = ccb->ccb_h.target_id; - reset_req->LUN[1] = ccb->ccb_h.target_lun; - - ccb->ccb_h.status |= CAM_SIM_QUEUED; - - error = mpt_send_handshake_cmd(mpt, - sizeof (MSG_SCSI_TASK_MGMT), reset_req); - if (error) { - device_printf(mpt->dev, - "mpt_bus_reset: mpt_send_handshake return %d\n", error); - return (CAM_REQ_CMP_ERR); - } else { - return (CAM_REQ_CMP); - } -} - -/* - * Process an asynchronous event from the IOC. - */ -static void mpt_ctlop(mpt_softc_t *, void *, u_int32_t); -static void mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *); - -void -mpt_ctlop(mpt_softc_t *mpt, void *vmsg, u_int32_t reply) -{ - MSG_DEFAULT_REPLY *dmsg = vmsg; - - if (dmsg->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { - mpt_event_notify_reply(mpt, vmsg); - mpt_free_reply(mpt, (reply << 1)); - } else if (dmsg->Function == MPI_FUNCTION_EVENT_ACK) { - mpt_free_reply(mpt, (reply << 1)); - } else if (dmsg->Function == MPI_FUNCTION_PORT_ENABLE) { - MSG_PORT_ENABLE_REPLY *msg = vmsg; - int index = msg->MsgContext & ~0x80000000; - if (mpt->verbose > 1) { - device_printf(mpt->dev, "enable port reply idx %d\n", - index); - } - if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { - request_t *req = &mpt->request_pool[index]; - req->debug = REQ_DONE; - } - mpt_free_reply(mpt, (reply << 1)); - } else if (dmsg->Function == MPI_FUNCTION_CONFIG) { - MSG_CONFIG_REPLY *msg = vmsg; - int index = msg->MsgContext & ~0x80000000; - if (index >= 0 && index < MPT_MAX_REQUESTS(mpt)) { - request_t *req = &mpt->request_pool[index]; - req->debug = REQ_DONE; - req->sequence = reply; - } else { - mpt_free_reply(mpt, (reply << 1)); - } - } else { - device_printf(mpt->dev, "unknown mpt_ctlop: %x\n", - dmsg->Function); - } -} - -static void -mpt_event_notify_reply(mpt_softc_t *mpt, MSG_EVENT_NOTIFY_REPLY *msg) -{ - switch(msg->Event) { - case MPI_EVENT_LOG_DATA: - /* Some error occured that LSI wants logged */ - device_printf(mpt->dev, - "\tEvtLogData: IOCLogInfo: 0x%08x\n", - msg->IOCLogInfo); - device_printf(mpt->dev, "\tEvtLogData: Event Data:"); - { - int i; - for (i = 0; i < msg->EventDataLength; i++) { - device_printf(mpt->dev, - " %08X", msg->Data[i]); - } - } - device_printf(mpt->dev, "\n"); - break; - - case MPI_EVENT_UNIT_ATTENTION: - device_printf(mpt->dev, - "Bus: 0x%02x TargetID: 0x%02x\n", - (msg->Data[0] >> 8) & 0xff, msg->Data[0] & 0xff); - break; - - case MPI_EVENT_IOC_BUS_RESET: - /* We generated a bus reset */ - device_printf(mpt->dev, "IOC Bus Reset Port: %d\n", - (msg->Data[0] >> 8) & 0xff); - break; - - case MPI_EVENT_EXT_BUS_RESET: - /* Someone else generated a bus reset */ - device_printf(mpt->dev, "Ext Bus Reset\n"); - /* - * These replies don't return EventData like the MPI - * spec says they do - */ -/* xpt_async(AC_BUS_RESET, path, NULL); */ - break; - - case MPI_EVENT_RESCAN: - /* - * In general this means a device has been added - * to the loop. - */ - device_printf(mpt->dev, - "Rescan Port: %d\n", (msg->Data[0] >> 8) & 0xff); -/* xpt_async(AC_FOUND_DEVICE, path, NULL); */ - break; - - case MPI_EVENT_LINK_STATUS_CHANGE: - device_printf(mpt->dev, "Port %d: LinkState: %s\n", - (msg->Data[1] >> 8) & 0xff, - ((msg->Data[0] & 0xff) == 0)? "Failed" : "Active"); - break; - - case MPI_EVENT_LOOP_STATE_CHANGE: - switch ((msg->Data[0] >> 16) & 0xff) { - case 0x01: - device_printf(mpt->dev, - "Port 0x%x: FC LinkEvent: LIP(%02X,%02X) (Loop Initialization)\n", - (msg->Data[1] >> 8) & 0xff, - (msg->Data[0] >> 8) & 0xff, - (msg->Data[0] ) & 0xff); - switch ((msg->Data[0] >> 8) & 0xff) { - case 0xF7: - if ((msg->Data[0] & 0xff) == 0xF7) { - kprintf("Device needs AL_PA\n"); - } else { - kprintf("Device %02X doesn't like FC performance\n", - msg->Data[0] & 0xFF); - } - break; - case 0xF8: - if ((msg->Data[0] & 0xff) == 0xF7) { - kprintf("Device had loop failure at its receiver prior to acquiring AL_PA\n"); - } else { - kprintf("Device %02X detected loop failure at its receiver\n", - msg->Data[0] & 0xFF); - } - break; - default: - kprintf("Device %02X requests that device %02X reset itself\n", - msg->Data[0] & 0xFF, - (msg->Data[0] >> 8) & 0xFF); - break; - } - break; - case 0x02: - device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: LPE(%02X,%02X) (Loop Port Enable)\n", - (msg->Data[1] >> 8) & 0xff, /* Port */ - (msg->Data[0] >> 8) & 0xff, /* Character 3 */ - (msg->Data[0] ) & 0xff /* Character 4 */ - ); - break; - case 0x03: - device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: LPB(%02X,%02X) (Loop Port Bypass)\n", - (msg->Data[1] >> 8) & 0xff, /* Port */ - (msg->Data[0] >> 8) & 0xff, /* Character 3 */ - (msg->Data[0] ) & 0xff /* Character 4 */ - ); - break; - default: - device_printf(mpt->dev, "Port 0x%x: FC LinkEvent: Unknown FC event (%02X %02X %02X)\n", - (msg->Data[1] >> 8) & 0xff, /* Port */ - (msg->Data[0] >> 16) & 0xff, /* Event */ - (msg->Data[0] >> 8) & 0xff, /* Character 3 */ - (msg->Data[0] ) & 0xff /* Character 4 */ - ); - } - break; - - case MPI_EVENT_LOGOUT: - device_printf(mpt->dev, "FC Logout Port: %d N_PortID: %02X\n", - (msg->Data[1] >> 8) & 0xff, - msg->Data[0]); - break; - case MPI_EVENT_EVENT_CHANGE: - /* This is just an acknowledgement of our - mpt_send_event_request */ - break; - default: - device_printf(mpt->dev, "Unknown event %X\n", msg->Event); - } - if (msg->AckRequired) { - MSG_EVENT_ACK *ackp; - request_t *req; - if ((req = mpt_get_request(mpt)) == NULL) { - panic("unable to get request to acknowledge notify"); - } - ackp = (MSG_EVENT_ACK *) req->req_vbuf; - bzero(ackp, sizeof *ackp); - ackp->Function = MPI_FUNCTION_EVENT_ACK; - ackp->Event = msg->Event; - ackp->EventContext = msg->EventContext; - ackp->MsgContext = req->index | 0x80000000; - mpt_check_doorbell(mpt); - mpt_send_cmd(mpt, req); - } -} - -void -mpt_done(mpt_softc_t *mpt, u_int32_t reply) -{ - int index; - request_t *req; - union ccb *ccb; - MSG_REQUEST_HEADER *mpt_req; - MSG_SCSI_IO_REPLY *mpt_reply; - - index = -1; /* Shutup the complier */ - - if ((reply & MPT_CONTEXT_REPLY) == 0) { - /* context reply */ - mpt_reply = NULL; - index = reply & MPT_CONTEXT_MASK; - } else { - unsigned *pReply; - - bus_dmamap_sync(mpt->reply_dmat, mpt->reply_dmap, - BUS_DMASYNC_POSTREAD); - /* address reply (Error) */ - mpt_reply = MPT_REPLY_PTOV(mpt, reply); - if (mpt->verbose > 1) { - pReply = (unsigned *) mpt_reply; - device_printf(mpt->dev, "Address Reply (index %u)\n", - mpt_reply->MsgContext & 0xffff); - device_printf(mpt->dev, "%08X %08X %08X %08X\n", - pReply[0], pReply[1], pReply[2], pReply[3]); - device_printf(mpt->dev, "%08X %08X %08X %08X\n", - pReply[4], pReply[5], pReply[6], pReply[7]); - device_printf(mpt->dev, "%08X %08X %08X %08X\n\n", - pReply[8], pReply[9], pReply[10], pReply[11]); - } - index = mpt_reply->MsgContext; - } - - /* - * Address reply with MessageContext high bit set - * This is most likely a notify message so we try - * to process it then free it - */ - if ((index & 0x80000000) != 0) { - if (mpt_reply != NULL) { - mpt_ctlop(mpt, mpt_reply, reply); - } else { - device_printf(mpt->dev, - "mpt_done: index 0x%x, NULL reply\n", index); - } - return; - } - - /* Did we end up with a valid index into the table? */ - if (index < 0 || index >= MPT_MAX_REQUESTS(mpt)) { - kprintf("mpt_done: invalid index (%x) in reply\n", index); - return; - } - - req = &mpt->request_pool[index]; - - /* Make sure memory hasn't been trashed */ - if (req->index != index) { - kprintf("mpt_done: corrupted request struct"); - return; - } - - /* Short cut for task management replys; nothing more for us to do */ - mpt_req = req->req_vbuf; - if (mpt_req->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { - if (mpt->verbose > 1) { - device_printf(mpt->dev, "mpt_done: TASK MGMT\n"); - } - goto done; - } - - if (mpt_req->Function == MPI_FUNCTION_PORT_ENABLE) { - goto done; - } - - /* - * At this point it better be a SCSI IO command, but don't - * crash if it isn't - */ - if (mpt_req->Function != MPI_FUNCTION_SCSI_IO_REQUEST) { - goto done; - } - - /* Recover the CAM control block from the request structure */ - ccb = req->ccb; - - /* Can't have had a SCSI command with out a CAM control block */ - if (ccb == NULL || (ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) { - device_printf(mpt->dev, - "mpt_done: corrupted ccb, index = 0x%02x seq = 0x%08x", - req->index, req->sequence); - kprintf(" request state %s\nmpt_request:\n", - mpt_req_state(req->debug)); - mpt_print_scsi_io_request((MSG_SCSI_IO_REQUEST *)req->req_vbuf); - - if (mpt_reply != NULL) { - kprintf("\nmpt_done: reply:\n"); - mpt_print_reply(MPT_REPLY_PTOV(mpt, reply)); - } else { - kprintf("\nmpt_done: context reply: 0x%08x\n", reply); - } - goto done; - } - - callout_stop(&ccb->ccb_h.timeout_ch); - - if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { - bus_dmasync_op_t op; - - if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { - op = BUS_DMASYNC_POSTREAD; - } else { - op = BUS_DMASYNC_POSTWRITE; - } - bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); - bus_dmamap_unload(mpt->buffer_dmat, req->dmap); - } - ccb->csio.resid = 0; - - if (mpt_reply == NULL) { - /* Context reply; report that the command was successfull */ - ccb->ccb_h.status = CAM_REQ_CMP; - ccb->csio.scsi_status = SCSI_STATUS_OK; - ccb->ccb_h.status &= ~CAM_SIM_QUEUED; - if (mpt->outofbeer) { - ccb->ccb_h.status |= CAM_RELEASE_SIMQ; - mpt->outofbeer = 0; - if (mpt->verbose > 1) { - device_printf(mpt->dev, "THAWQ\n"); - } - } - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - goto done; - } - - ccb->csio.scsi_status = mpt_reply->SCSIStatus; - switch(mpt_reply->IOCStatus) { - case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: - ccb->ccb_h.status = CAM_DATA_RUN_ERR; - break; - - case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: - /* - * Yikes, Tagged queue full comes through this path! - * - * So we'll change it to a status error and anything - * that returns status should probably be a status - * error as well. - */ - ccb->csio.resid = - ccb->csio.dxfer_len - mpt_reply->TransferCount; - if (mpt_reply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS) { - ccb->ccb_h.status = CAM_DATA_RUN_ERR; - break; - } -#if 0 -device_printf(mpt->dev, "underrun, scsi status is %x\n", ccb->csio.scsi_status); - ccb->csio.scsi_status = SCSI_STATUS_QUEUE_FULL; -#endif - /* Fall through */ - case MPI_IOCSTATUS_SUCCESS: - case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: - switch (ccb->csio.scsi_status) { - case SCSI_STATUS_OK: - ccb->ccb_h.status = CAM_REQ_CMP; - break; - default: - ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; - break; - } - break; - case MPI_IOCSTATUS_BUSY: - case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: - ccb->ccb_h.status = CAM_BUSY; - break; - - case MPI_IOCSTATUS_SCSI_INVALID_BUS: - case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: - case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: - ccb->ccb_h.status = CAM_DEV_NOT_THERE; - break; - - case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: - ccb->ccb_h.status = CAM_DATA_RUN_ERR; - break; - - case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: - case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: - ccb->ccb_h.status = CAM_UNCOR_PARITY; - break; - - case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: - ccb->ccb_h.status = CAM_REQ_CMP; - break; - - case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: - ccb->ccb_h.status = CAM_UA_TERMIO; - break; - - case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: - ccb->ccb_h.status = CAM_REQ_TERMIO; - break; - - case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: - ccb->ccb_h.status = CAM_SCSI_BUS_RESET; - break; - - default: - ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; - break; - } - - if ((mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0) { - if (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) { - ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; - } else { - ccb->ccb_h.status |= CAM_AUTOSNS_VALID; - ccb->csio.sense_resid = mpt_reply->SenseCount; - bcopy(req->sense_vbuf, &ccb->csio.sense_data, - ccb->csio.sense_len); - } - } else if (mpt_reply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) { - ccb->ccb_h.status &= ~CAM_STATUS_MASK; - ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; - } - - if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { - if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { - ccb->ccb_h.status |= CAM_DEV_QFRZN; - xpt_freeze_devq(ccb->ccb_h.path, 1); - } - } - - - ccb->ccb_h.status &= ~CAM_SIM_QUEUED; - if (mpt->outofbeer) { - ccb->ccb_h.status |= CAM_RELEASE_SIMQ; - mpt->outofbeer = 0; - if (mpt->verbose > 1) { - device_printf(mpt->dev, "THAWQ\n"); - } - } - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - -done: - /* If IOC done with this request free it up */ - if (mpt_reply == NULL || (mpt_reply->MsgFlags & 0x80) == 0) - mpt_free_request(mpt, req); - - /* If address reply; give the buffer back to the IOC */ - if (mpt_reply != NULL) - mpt_free_reply(mpt, (reply << 1)); -} - -static void -mpt_action(struct cam_sim *sim, union ccb *ccb) -{ - int tgt, error; - mpt_softc_t *mpt; - struct ccb_trans_settings *cts; - - CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); - - mpt = (mpt_softc_t *)cam_sim_softc(sim); - - ccb->ccb_h.ccb_mpt_ptr = mpt; - - switch (ccb->ccb_h.func_code) { - case XPT_RESET_BUS: - if (mpt->verbose > 1) - device_printf(mpt->dev, "XPT_RESET_BUS\n"); - error = mpt_bus_reset(ccb); - switch (error) { - case CAM_REQ_INPROG: - break; - case CAM_REQUEUE_REQ: - if (mpt->outofbeer == 0) { - mpt->outofbeer = 1; - xpt_freeze_simq(sim, 1); - if (mpt->verbose > 1) { - device_printf(mpt->dev, "FREEZEQ\n"); - } - } - ccb->ccb_h.status = CAM_REQUEUE_REQ; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - - case CAM_REQ_CMP: - ccb->ccb_h.status &= ~CAM_SIM_QUEUED; - ccb->ccb_h.status |= CAM_REQ_CMP; - if (mpt->outofbeer) { - ccb->ccb_h.status |= CAM_RELEASE_SIMQ; - mpt->outofbeer = 0; - if (mpt->verbose > 1) { - device_printf(mpt->dev, "THAWQ\n"); - } - } - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - - default: - ccb->ccb_h.status = CAM_REQ_CMP_ERR; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - } - break; - - case XPT_SCSI_IO: /* Execute the requested I/O operation */ - /* - * Do a couple of preliminary checks... - */ - if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { - if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { - ccb->ccb_h.status = CAM_REQ_INVALID; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - } - } - /* Max supported CDB length is 16 bytes */ - if (ccb->csio.cdb_len > - sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { - ccb->ccb_h.status = CAM_REQ_INVALID; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - return; - } - ccb->csio.scsi_status = SCSI_STATUS_OK; - mpt_start(ccb); - break; - - case XPT_ABORT: - /* - * XXX: Need to implement - */ - ccb->ccb_h.status = CAM_UA_ABORT; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - -#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) -#define DP_DISC_ENABLE 0x1 -#define DP_DISC_DISABL 0x2 -#define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) - -#define DP_TQING_ENABLE 0x4 -#define DP_TQING_DISABL 0x8 -#define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) - -#define DP_WIDE 0x10 -#define DP_NARROW 0x20 -#define DP_WIDTH (DP_WIDE|DP_NARROW) - -#define DP_SYNC 0x40 - - case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ - cts = &ccb->cts; - if (!IS_CURRENT_SETTINGS(cts)) { - ccb->ccb_h.status = CAM_REQ_INVALID; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - } - tgt = cts->ccb_h.target_id; - if (mpt->is_fc == 0) { - u_int8_t dval = 0; - u_int period = 0, offset = 0; - struct ccb_trans_settings_scsi *scsi = - &cts->proto_specific.scsi; - struct ccb_trans_settings_spi *spi = - &cts->xport_specific.spi; - - if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { - if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) - dval |= DP_DISC_ENABLE; - else - dval |= DP_DISC_DISABL; - } - - if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { - if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) - dval |= DP_TQING_ENABLE; - else - dval |= DP_TQING_DISABL; - } - - if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { - if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) - dval |= DP_WIDE; - else - dval |= DP_NARROW; - } - - if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) && - (spi->valid & CTS_SPI_VALID_SYNC_RATE) && - (spi->sync_period && spi->sync_offset)) { - dval |= DP_SYNC; - period = spi->sync_period; - offset = spi->sync_offset; - } - if (dval & DP_DISC_ENABLE) { - mpt->mpt_disc_enable |= (1 << tgt); - } else if (dval & DP_DISC_DISABL) { - mpt->mpt_disc_enable &= ~(1 << tgt); - } - if (dval & DP_TQING_ENABLE) { - mpt->mpt_tag_enable |= (1 << tgt); - } else if (dval & DP_TQING_DISABL) { - mpt->mpt_tag_enable &= ~(1 << tgt); - } - if (dval & DP_WIDTH) { - if (mpt_setwidth(mpt, tgt, dval & DP_WIDE)) { - ccb->ccb_h.status = CAM_REQ_CMP_ERR; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - } - } - if (dval & DP_SYNC) { - if (mpt_setsync(mpt, tgt, period, offset)) { - ccb->ccb_h.status = CAM_REQ_CMP_ERR; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - } - } - if (mpt->verbose > 1) { - device_printf(mpt->dev, - "SET tgt %d flags %x period %x off %x\n", - tgt, dval, period, offset); - } - } - ccb->ccb_h.status = CAM_REQ_CMP; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - - case XPT_GET_TRAN_SETTINGS: - cts = &ccb->cts; - tgt = cts->ccb_h.target_id; - if (mpt->is_fc) { - struct ccb_trans_settings_fc *fc = - &cts->xport_specific.fc; - - cts->protocol = PROTO_SCSI; - cts->protocol_version = SCSI_REV_2; - cts->transport = XPORT_FC; - cts->transport_version = 0; - - fc->valid = CTS_FC_VALID_SPEED; - fc->bitrate = 100000; /* XXX: Need for 2Gb/s */ - /* XXX: need a port database for each target */ - } else { - struct ccb_trans_settings_scsi *scsi = - &cts->proto_specific.scsi; - struct ccb_trans_settings_spi *spi = - &cts->xport_specific.spi; - u_int8_t dval, pval, oval; - - /* - * We aren't going off of Port PAGE2 params for - * tagged queuing or disconnect capabilities - * for current settings. For goal settings, - * we assert all capabilities- we've had some - * problems with reading NVRAM data. - */ - if (IS_CURRENT_SETTINGS(cts)) { - fCONFIG_PAGE_SCSI_DEVICE_0 tmp; - dval = 0; - - tmp = mpt->mpt_dev_page0[tgt]; - if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) { - device_printf(mpt->dev, - "cannot get target %d DP0\n", tgt); - } else { - if (mpt->verbose > 1) { - device_printf(mpt->dev, - "SPI Tgt %d Page 0: NParms %x Information %x\n", - tgt, - tmp.NegotiatedParameters, - tmp.Information); - } - } - - if (tmp.NegotiatedParameters & - MPI_SCSIDEVPAGE0_NP_WIDE) - dval |= DP_WIDE; - - if (mpt->mpt_disc_enable & (1 << tgt)) { - dval |= DP_DISC_ENABLE; - } - if (mpt->mpt_tag_enable & (1 << tgt)) { - dval |= DP_TQING_ENABLE; - } - oval = (tmp.NegotiatedParameters >> 16) & 0xff; - pval = (tmp.NegotiatedParameters >> 8) & 0xff; - } else { - /* - * XXX: Fix wrt NVRAM someday. Attempts - * XXX: to read port page2 device data - * XXX: just returns zero in these areas. - */ - dval = DP_WIDE|DP_DISC|DP_TQING; - oval = (mpt->mpt_port_page0.Capabilities >> 16); - pval = (mpt->mpt_port_page0.Capabilities >> 8); - } - cts->protocol = PROTO_SCSI; - cts->protocol_version = SCSI_REV_2; - cts->transport = XPORT_SPI; - cts->transport_version = 2; - - scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; - spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; - if (dval & DP_DISC_ENABLE) { - spi->flags |= CTS_SPI_FLAGS_DISC_ENB; - } - if (dval & DP_TQING_ENABLE) { - scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; - } - if (oval && pval) { - spi->sync_offset = oval; - spi->sync_period = pval; - spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; - spi->valid |= CTS_SPI_VALID_SYNC_RATE; - } - spi->valid |= CTS_SPI_VALID_BUS_WIDTH; - if (dval & DP_WIDE) { - spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; - } else { - spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; - } - if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { - scsi->valid = CTS_SCSI_VALID_TQ; - spi->valid |= CTS_SPI_VALID_DISC; - } else { - scsi->valid = 0; - } - if (mpt->verbose > 1) { - device_printf(mpt->dev, - "GET %s tgt %d flags %x period %x off %x\n", - IS_CURRENT_SETTINGS(cts)? "ACTIVE" : - "NVRAM", tgt, dval, pval, oval); - } - } - ccb->ccb_h.status = CAM_REQ_CMP; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - - case XPT_CALC_GEOMETRY: - { - struct ccb_calc_geometry *ccg; - u_int32_t secs_per_cylinder; - u_int32_t size_mb; - - ccg = &ccb->ccg; - if (ccg->block_size == 0) { - ccb->ccb_h.status = CAM_REQ_INVALID; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - } - - size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size); - if (size_mb > 1024) { - ccg->heads = 255; - ccg->secs_per_track = 63; - } else { - ccg->heads = 64; - ccg->secs_per_track = 32; - } - secs_per_cylinder = ccg->heads * ccg->secs_per_track; - ccg->cylinders = ccg->volume_size / secs_per_cylinder; - ccb->ccb_h.status = CAM_REQ_CMP; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - } - case XPT_PATH_INQ: /* Path routing inquiry */ - { - struct ccb_pathinq *cpi = &ccb->cpi; - - cpi->version_num = 1; - cpi->target_sprt = 0; - cpi->hba_eng_cnt = 0; - cpi->max_lun = 7; - cpi->bus_id = cam_sim_bus(sim); - if (mpt->is_fc) { - cpi->max_target = 255; - cpi->hba_misc = PIM_NOBUSRESET; - cpi->initiator_id = cpi->max_target + 1; - cpi->base_transfer_speed = 100000; - cpi->hba_inquiry = PI_TAG_ABLE; - } else { - cpi->initiator_id = mpt->mpt_ini_id; - cpi->base_transfer_speed = 3300; - cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; - cpi->hba_misc = 0; - cpi->max_target = 15; - } - - strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); - strncpy(cpi->hba_vid, "LSI", HBA_IDLEN); - strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); - cpi->unit_number = cam_sim_unit(sim); - cpi->ccb_h.status = CAM_REQ_CMP; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - } - default: - ccb->ccb_h.status = CAM_REQ_INVALID; - MPT_LOCK(mpt); - xpt_done(ccb); - MPT_UNLOCK(mpt); - break; - } -} - -static int -mpt_setwidth(mpt_softc_t *mpt, int tgt, int onoff) -{ - fCONFIG_PAGE_SCSI_DEVICE_1 tmp; - tmp = mpt->mpt_dev_page1[tgt]; - if (onoff) { - tmp.RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; - } else { - tmp.RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; - } - if (mpt_write_cfg_page(mpt, tgt, &tmp.Header)) { - return (-1); - } - if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) { - return (-1); - } - mpt->mpt_dev_page1[tgt] = tmp; - if (mpt->verbose > 1) { - device_printf(mpt->dev, - "SPI Target %d Page 1: RequestedParameters %x Config %x\n", - tgt, mpt->mpt_dev_page1[tgt].RequestedParameters, - mpt->mpt_dev_page1[tgt].Configuration); - } - return (0); -} - -static int -mpt_setsync(mpt_softc_t *mpt, int tgt, int period, int offset) -{ - fCONFIG_PAGE_SCSI_DEVICE_1 tmp; - tmp = mpt->mpt_dev_page1[tgt]; - tmp.RequestedParameters &= - ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; - tmp.RequestedParameters &= - ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; - tmp.RequestedParameters &= - ~MPI_SCSIDEVPAGE1_RP_DT; - tmp.RequestedParameters &= - ~MPI_SCSIDEVPAGE1_RP_QAS; - tmp.RequestedParameters &= - ~MPI_SCSIDEVPAGE1_RP_IU; - /* - * XXX: For now, we're ignoring specific settings - */ - if (period && offset) { - int factor, offset, np; - factor = (mpt->mpt_port_page0.Capabilities >> 8) & 0xff; - offset = (mpt->mpt_port_page0.Capabilities >> 16) & 0xff; - np = 0; - if (factor < 0x9) { - np |= MPI_SCSIDEVPAGE1_RP_QAS; - np |= MPI_SCSIDEVPAGE1_RP_IU; - } - if (factor < 0xa) { - np |= MPI_SCSIDEVPAGE1_RP_DT; - } - np |= (factor << 8) | (offset << 16); - tmp.RequestedParameters |= np; - } - if (mpt_write_cfg_page(mpt, tgt, &tmp.Header)) { - return (-1); - } - if (mpt_read_cfg_page(mpt, tgt, &tmp.Header)) { - return (-1); - } - mpt->mpt_dev_page1[tgt] = tmp; - if (mpt->verbose > 1) { - device_printf(mpt->dev, - "SPI Target %d Page 1: RParams %x Config %x\n", - tgt, mpt->mpt_dev_page1[tgt].RequestedParameters, - mpt->mpt_dev_page1[tgt].Configuration); - } - return (0); -} diff --git a/sys/dev/disk/mpt/mpt_freebsd.h b/sys/dev/disk/mpt/mpt_freebsd.h deleted file mode 100644 index 8d8511d9b6..0000000000 --- a/sys/dev/disk/mpt/mpt_freebsd.h +++ /dev/null @@ -1,282 +0,0 @@ -/* $FreeBSD: src/sys/dev/mpt/mpt_freebsd.h,v 1.3.2.3 2002/09/24 21:37:25 mjacob Exp $ */ -/* $DragonFly: src/sys/dev/disk/mpt/mpt_freebsd.h,v 1.11 2008/01/06 01:29:00 swildner Exp $ */ -/* - * LSI MPT Host Adapter FreeBSD Wrapper Definitions (CAM version) - * - * Copyright (c) 2000, 2001 by Greg Ansley, Adam Prewett - * - * Partially derived from Matty Jacobs ISP driver. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice immediately at the beginning of the file, without modification, - * this list of conditions, and the following disclaimer. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - */ -/* - * Additional Copyright (c) 2002 by Matthew Jacob under same license. - */ - -#ifndef _MPT_FREEBSD_H_ -#define _MPT_FREEBSD_H_ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "opt_ddb.h" - -#include "mpilib/mpi_type.h" -#include "mpilib/mpi.h" -#include "mpilib/mpi_cnfg.h" -#include "mpilib/mpi_fc.h" -#include "mpilib/mpi_init.h" -#include "mpilib/mpi_ioc.h" -#include "mpilib/mpi_lan.h" -#include "mpilib/mpi_targ.h" - - -#define INLINE __inline - -#define MPT_IFLAGS 0 -#define MPT_LOCK(mpt) crit_enter() -#define MPT_UNLOCK(mpt) crit_exit() -#define MPT_LOCK_SETUP(mpt) -#define MPT_LOCK_DESTROY(mpt) - -/* Max MPT Reply we are willing to accept (must be power of 2) */ -#define MPT_REPLY_SIZE 128 - -#define MPT_MAX_REQUESTS(mpt) ((mpt)->is_fc? 1024 : 256) -#define MPT_REQUEST_AREA 512 -#define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_SIZE */ -#define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA) - -/* - * We cannot tell prior to getting IOC facts how big the IOC's request - * area is. Because of this we cannot tell at compile time how many - * simple SG elements we can fit within an IOC request prior to having - * to put in a chain element. - * - * Experimentally we know that the Ultra4 parts have a 96 byte request - * element size and the Fibre Channel units have a 144 byte request - * element size. Therefore, if we have 512-32 (== 480) bytes of request - * area to play with, we have room for between 3 and 5 request sized - * regions- the first of which is the command plus a simple SG list, - * the rest of which are chained continuation SG lists. Given that the - * normal request we use is 48 bytes w/o the first SG element, we can - * assume we have 480-48 == 432 bytes to have simple SG elements and/or - * chain elements. If we assume 32 bit addressing, this works out to - * 54 SG or chain elements. If we assume 5 chain elements, then we have - * a maximum of 49 separate actual SG segments. - */ - -#define MPT_SGL_MAX 49 - -#define MPT_RQSL(mpt) (mpt->request_frame_size << 2) -#define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_SIMPLE32)) - -#define MPT_NSGL_FIRST(mpt) \ - (((mpt->request_frame_size << 2) - \ - sizeof (MSG_SCSI_IO_REQUEST) - \ - sizeof (SGE_IO_UNION)) / sizeof (SGE_SIMPLE32)) - -/* - * Convert a physical address returned from IOC to kvm address - * needed to access the data. - */ -#define MPT_REPLY_PTOV(m, x) \ - ((void *)(&m->reply[((x << 1) - m->reply_phys)])) - -#define ccb_mpt_ptr sim_priv.entries[0].ptr -#define ccb_req_ptr sim_priv.entries[1].ptr - -enum mpt_req_state { - REQ_FREE, REQ_IN_PROGRESS, REQ_TIMEOUT, REQ_ON_CHIP, REQ_DONE -}; -typedef struct req_entry { - u_int16_t index; /* Index of this entry */ - union ccb * ccb; /* CAM request */ - void * req_vbuf; /* Virtual Address of Entry */ - void * sense_vbuf; /* Virtual Address of sense data */ - bus_addr_t req_pbuf; /* Physical Address of Entry */ - bus_addr_t sense_pbuf; /* Physical Address of sense data */ - bus_dmamap_t dmap; /* DMA map for data buffer */ - SLIST_ENTRY(req_entry) link; /* Pointer to next in list */ - enum mpt_req_state debug; /* Debugging */ - u_int32_t sequence; /* Sequence Number */ - struct callout timeout; -} request_t; - - -/* Structure for saving proper values for modifyable PCI configuration registers */ -struct mpt_pci_cfg { - u_int16_t Command; - u_int16_t LatencyTimer_LineSize; - u_int32_t IO_BAR; - u_int32_t Mem0_BAR[2]; - u_int32_t Mem1_BAR[2]; - u_int32_t ROM_BAR; - u_int8_t IntLine; - u_int32_t PMCSR; -}; - -typedef struct mpt_softc { - device_t dev; - u_int32_t : 16, - unit : 8, - verbose : 3, - outofbeer : 1, - mpt_locksetup : 1, - disabled : 1, - is_fc : 1, - is_sas : 1, - bus : 1; /* FC929/1030 have two busses */ - - /* - * IOC Facts - */ - u_int16_t mpt_global_credits; - u_int16_t request_frame_size; - u_int8_t mpt_max_devices; - u_int8_t mpt_max_buses; - - /* - * Port Facts - */ - u_int16_t mpt_ini_id; - - - /* - * Device Configuration Information - */ - union { - struct mpt_spi_cfg { - fCONFIG_PAGE_SCSI_PORT_0 _port_page0; - fCONFIG_PAGE_SCSI_PORT_1 _port_page1; - fCONFIG_PAGE_SCSI_PORT_2 _port_page2; - fCONFIG_PAGE_SCSI_DEVICE_0 _dev_page0[16]; - fCONFIG_PAGE_SCSI_DEVICE_1 _dev_page1[16]; - uint16_t _tag_enable; - uint16_t _disc_enable; - uint16_t _update_params0; - uint16_t _update_params1; - } spi; -#define mpt_port_page0 cfg.spi._port_page0 -#define mpt_port_page1 cfg.spi._port_page1 -#define mpt_port_page2 cfg.spi._port_page2 -#define mpt_dev_page0 cfg.spi._dev_page0 -#define mpt_dev_page1 cfg.spi._dev_page1 -#define mpt_tag_enable cfg.spi._tag_enable -#define mpt_disc_enable cfg.spi._disc_enable -#define mpt_update_params0 cfg.spi._update_params0 -#define mpt_update_params1 cfg.spi._update_params1 - struct mpi_fc_cfg { - u_int8_t nada; - } fc; - } cfg; - - /* - * PCI Hardware info - */ - struct resource * pci_irq; /* Interrupt map for chip */ - void * ih; /* Interupt handle */ - struct mpt_pci_cfg pci_cfg; /* saved PCI conf registers */ - - /* - * DMA Mapping Stuff - */ - - struct resource * pci_reg; /* Register map for chip */ - int pci_reg_id; /* Resource ID */ - bus_space_tag_t pci_st; /* Bus tag for registers */ - bus_space_handle_t pci_sh; /* Bus handle for registers */ - vm_offset_t pci_pa; /* Physical Address */ - - bus_dma_tag_t parent_dmat; /* DMA tag for parent PCI bus */ - bus_dma_tag_t reply_dmat; /* DMA tag for reply memory */ - bus_dmamap_t reply_dmap; /* DMA map for reply memory */ - char * reply; /* KVA of reply memory */ - bus_addr_t reply_phys; /* BusAddr of reply memory */ - - - bus_dma_tag_t buffer_dmat; /* DMA tag for buffers */ - bus_dma_tag_t request_dmat; /* DMA tag for request memroy */ - bus_dmamap_t request_dmap; /* DMA map for request memroy */ - char * request; /* KVA of Request memory */ - bus_addr_t request_phys; /* BusADdr of request memory */ - - /* - * CAM && Software Management - */ - - request_t * request_pool; - SLIST_HEAD(req_queue, req_entry) request_free_list; - - struct cam_sim * sim; - struct cam_path * path; - - u_int32_t sequence; /* Sequence Number */ - u_int32_t timeouts; /* timeout count */ - u_int32_t success; /* successes afer timeout */ - - /* Opposing port in a 929 or 1030, or NULL */ - struct mpt_softc * mpt2; - -} mpt_softc_t; - -#include "mpt.h" - - -static INLINE void mpt_write(mpt_softc_t *, size_t, u_int32_t); -static INLINE u_int32_t mpt_read(mpt_softc_t *, int); - -static INLINE void -mpt_write(mpt_softc_t *mpt, size_t offset, u_int32_t val) -{ - bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val); -} - -static INLINE u_int32_t -mpt_read(mpt_softc_t *mpt, int offset) -{ - return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset)); -} - -void mpt_cam_attach(mpt_softc_t *); -void mpt_cam_detach(mpt_softc_t *); -void mpt_done(mpt_softc_t *, u_int32_t); -void mpt_set_config_regs(mpt_softc_t *); - -#endif /* _MPT_FREEBSD_H */ diff --git a/sys/dev/disk/mpt/mpt_pci.c b/sys/dev/disk/mpt/mpt_pci.c index 397fd6ba3f..02999e087a 100644 --- a/sys/dev/disk/mpt/mpt_pci.c +++ b/sys/dev/disk/mpt/mpt_pci.c @@ -96,14 +96,14 @@ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * $FreeBSD: src/sys/dev/mpt/mpt_pci.c,v 1.54 2009/07/10 08:18:08 scottl Exp $ */ #include -__FBSDID("$FreeBSD: src/sys/dev/mpt/mpt_pci.c,v 1.54 2009/07/10 08:18:08 scottl Exp $"); -#include -#include -#include +#include +#include +#include #if __FreeBSD_version < 700000 #define pci_msix_count(x) 0 @@ -287,31 +287,31 @@ mpt_set_options(struct mpt_softc *mpt) int bitmap; bitmap = 0; - if (getenv_int("mpt_disable", &bitmap)) { + if (kgetenv_int("mpt_disable", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->disabled = 1; } } bitmap = 0; - if (getenv_int("mpt_debug", &bitmap)) { + if (kgetenv_int("mpt_debug", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG; } } bitmap = 0; - if (getenv_int("mpt_debug1", &bitmap)) { + if (kgetenv_int("mpt_debug1", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG1; } } bitmap = 0; - if (getenv_int("mpt_debug2", &bitmap)) { + if (kgetenv_int("mpt_debug2", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG2; } } bitmap = 0; - if (getenv_int("mpt_debug3", &bitmap)) { + if (kgetenv_int("mpt_debug3", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->verbose = MPT_PRT_DEBUG3; } @@ -319,21 +319,21 @@ mpt_set_options(struct mpt_softc *mpt) mpt->cfg_role = MPT_ROLE_DEFAULT; bitmap = 0; - if (getenv_int("mpt_nil_role", &bitmap)) { + if (kgetenv_int("mpt_nil_role", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->cfg_role = 0; } mpt->do_cfg_role = 1; } bitmap = 0; - if (getenv_int("mpt_tgt_role", &bitmap)) { + if (kgetenv_int("mpt_tgt_role", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->cfg_role |= MPT_ROLE_TARGET; } mpt->do_cfg_role = 1; } bitmap = 0; - if (getenv_int("mpt_ini_role", &bitmap)) { + if (kgetenv_int("mpt_ini_role", &bitmap)) { if (bitmap & (1 << mpt->unit)) { mpt->cfg_role |= MPT_ROLE_INITIATOR; } @@ -576,7 +576,7 @@ mpt_pci_attach(device_t dev) } /* Allocate dma memory */ -/* XXX JGibbs -Should really be done based on IOCFacts. */ + /* XXX JGibbs -Should really be done based on IOCFacts. */ if (mpt_dma_mem_alloc(mpt)) { mpt_prt(mpt, "Could not allocate DMA memory\n"); goto bad; @@ -728,14 +728,14 @@ mpt_dma_mem_alloc(struct mpt_softc *mpt) len = sizeof (request_t) * MPT_MAX_REQUESTS(mpt); #ifdef RELENG_4 - mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK); + mpt->request_pool = (request_t *)kmalloc(len, M_DEVBUF, M_WAITOK); if (mpt->request_pool == NULL) { mpt_prt(mpt, "cannot allocate request pool\n"); return (1); } memset(mpt->request_pool, 0, len); #else - mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK|M_ZERO); + mpt->request_pool = (request_t *)kmalloc(len, M_DEVBUF, M_WAITOK|M_ZERO); if (mpt->request_pool == NULL) { mpt_prt(mpt, "cannot allocate request pool\n"); return (1); @@ -896,7 +896,7 @@ mpt_dma_mem_free(struct mpt_softc *mpt) bus_dma_tag_destroy(mpt->reply_dmat); bus_dma_tag_destroy(mpt->parent_dmat); mpt->reply_dmat = 0; - free(mpt->request_pool, M_DEVBUF); + kfree(mpt->request_pool, M_DEVBUF); mpt->request_pool = 0; } diff --git a/sys/dev/disk/mpt/mpt_raid.c b/sys/dev/disk/mpt/mpt_raid.c new file mode 100644 index 0000000000..fcb97677e3 --- /dev/null +++ b/sys/dev/disk/mpt/mpt_raid.c @@ -0,0 +1,1842 @@ +/*- + * Routines for handling the integrated RAID features LSI MPT Fusion adapters. + * + * Copyright (c) 2005, WHEEL Sp. z o.o. + * Copyright (c) 2005 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the names of the above listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/*- + * Some Breakage and Bug Fixing added later. + * Copyright (c) 2006, by Matthew Jacob + * All Rights Reserved + * + * Support from LSI-Logic has also gone a great deal toward making this a + * workable subsystem and is gratefully acknowledged. + * $FreeBSD: src/sys/dev/mpt/mpt_raid.c,v 1.20 2009/05/21 12:36:40 jhb Exp $ + */ + +#include + +#include +#include + +#include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ +#include "dev/disk/mpt/mpilib/mpi_raid.h" + +#include +#include +#include +#include +#include + +#if __FreeBSD_version < 500000 +#include +#define GIANT_REQUIRED +#endif +#include + +#include +#include +#include + +#include + +struct mpt_raid_action_result +{ + union { + MPI_RAID_VOL_INDICATOR indicator_struct; + uint32_t new_settings; + uint8_t phys_disk_num; + } action_data; + uint16_t action_status; +}; + +#define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \ + (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1)) + +#define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK) + + +static mpt_probe_handler_t mpt_raid_probe; +static mpt_attach_handler_t mpt_raid_attach; +static mpt_enable_handler_t mpt_raid_enable; +static mpt_event_handler_t mpt_raid_event; +static mpt_shutdown_handler_t mpt_raid_shutdown; +static mpt_reset_handler_t mpt_raid_ioc_reset; +static mpt_detach_handler_t mpt_raid_detach; + +static struct mpt_personality mpt_raid_personality = +{ + .name = "mpt_raid", + .probe = mpt_raid_probe, + .attach = mpt_raid_attach, + .enable = mpt_raid_enable, + .event = mpt_raid_event, + .reset = mpt_raid_ioc_reset, + .shutdown = mpt_raid_shutdown, + .detach = mpt_raid_detach, +}; + +DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD); +MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1); + +static mpt_reply_handler_t mpt_raid_reply_handler; +static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req, + MSG_DEFAULT_REPLY *reply_frame); +static int mpt_spawn_raid_thread(struct mpt_softc *mpt); +static void mpt_terminate_raid_thread(struct mpt_softc *mpt); +static void mpt_raid_thread(void *arg); +static timeout_t mpt_raid_timer; +#if 0 +static void mpt_enable_vol(struct mpt_softc *mpt, + struct mpt_raid_volume *mpt_vol, int enable); +#endif +static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *); +static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *, + struct cam_path *); +#if __FreeBSD_version < 500000 +#define mpt_raid_sysctl_attach(x) do { } while (0) +#else +static void mpt_raid_sysctl_attach(struct mpt_softc *); +#endif + +static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE; + +const char * +mpt_vol_type(struct mpt_raid_volume *vol) +{ + switch (vol->config_page->VolumeType) { + case MPI_RAID_VOL_TYPE_IS: + return ("RAID-0"); + case MPI_RAID_VOL_TYPE_IME: + return ("RAID-1E"); + case MPI_RAID_VOL_TYPE_IM: + return ("RAID-1"); + default: + return ("Unknown"); + } +} + +const char * +mpt_vol_state(struct mpt_raid_volume *vol) +{ + switch (vol->config_page->VolumeStatus.State) { + case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: + return ("Optimal"); + case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: + return ("Degraded"); + case MPI_RAIDVOL0_STATUS_STATE_FAILED: + return ("Failed"); + default: + return ("Unknown"); + } +} + +const char * +mpt_disk_state(struct mpt_raid_disk *disk) +{ + switch (disk->config_page.PhysDiskStatus.State) { + case MPI_PHYSDISK0_STATUS_ONLINE: + return ("Online"); + case MPI_PHYSDISK0_STATUS_MISSING: + return ("Missing"); + case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE: + return ("Incompatible"); + case MPI_PHYSDISK0_STATUS_FAILED: + return ("Failed"); + case MPI_PHYSDISK0_STATUS_INITIALIZING: + return ("Initializing"); + case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED: + return ("Offline Requested"); + case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED: + return ("Failed per Host Request"); + case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE: + return ("Offline"); + default: + return ("Unknown"); + } +} + +void +mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol, + const char *fmt, ...) +{ + __va_list ap; + + kprintf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev), + (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev), + vol->config_page->VolumeBus, vol->config_page->VolumeID); + __va_start(ap, fmt); + kvprintf(fmt, ap); + __va_end(ap); +} + +void +mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk, + const char *fmt, ...) +{ + __va_list ap; + + if (disk->volume != NULL) { + kprintf("(%s:vol%d:%d): ", + device_get_nameunit(mpt->dev), + disk->volume->config_page->VolumeID, + disk->member_number); + } else { + kprintf("(%s:%d:%d): ", device_get_nameunit(mpt->dev), + disk->config_page.PhysDiskBus, + disk->config_page.PhysDiskID); + } + __va_start(ap, fmt); + kvprintf(fmt, ap); + __va_end(ap); +} + +static void +mpt_raid_async(void *callback_arg, u_int32_t code, + struct cam_path *path, void *arg) +{ + struct mpt_softc *mpt; + + mpt = (struct mpt_softc*)callback_arg; + switch (code) { + case AC_FOUND_DEVICE: + { + struct ccb_getdev *cgd; + struct mpt_raid_volume *mpt_vol; + + cgd = (struct ccb_getdev *)arg; + if (cgd == NULL) { + break; + } + + mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n", + cgd->ccb_h.target_id); + + RAID_VOL_FOREACH(mpt, mpt_vol) { + if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) + continue; + + if (mpt_vol->config_page->VolumeID + == cgd->ccb_h.target_id) { + mpt_adjust_queue_depth(mpt, mpt_vol, path); + break; + } + } + } + default: + break; + } +} + +int +mpt_raid_probe(struct mpt_softc *mpt) +{ + if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { + return (ENODEV); + } + return (0); +} + +int +mpt_raid_attach(struct mpt_softc *mpt) +{ + struct ccb_setasync csa; + mpt_handler_t handler; + int error; + + mpt_callout_init(&mpt->raid_timer); + + error = mpt_spawn_raid_thread(mpt); + if (error != 0) { + mpt_prt(mpt, "Unable to spawn RAID thread!\n"); + goto cleanup; + } + + MPT_LOCK(mpt); + handler.reply_handler = mpt_raid_reply_handler; + error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, + &raid_handler_id); + if (error != 0) { + mpt_prt(mpt, "Unable to register RAID haandler!\n"); + goto cleanup; + } + + xpt_setup_ccb(&csa.ccb_h, mpt->path, 5); + csa.ccb_h.func_code = XPT_SASYNC_CB; + csa.event_enable = AC_FOUND_DEVICE; + csa.callback = mpt_raid_async; + csa.callback_arg = mpt; + xpt_action((union ccb *)&csa); + if (csa.ccb_h.status != CAM_REQ_CMP) { + mpt_prt(mpt, "mpt_raid_attach: Unable to register " + "CAM async handler.\n"); + } + MPT_UNLOCK(mpt); + + mpt_raid_sysctl_attach(mpt); + return (0); +cleanup: + MPT_UNLOCK(mpt); + mpt_raid_detach(mpt); + return (error); +} + +int +mpt_raid_enable(struct mpt_softc *mpt) +{ + return (0); +} + +void +mpt_raid_detach(struct mpt_softc *mpt) +{ + struct ccb_setasync csa; + mpt_handler_t handler; + + callout_stop(&mpt->raid_timer); + MPT_LOCK(mpt); + mpt_terminate_raid_thread(mpt); + + handler.reply_handler = mpt_raid_reply_handler; + mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, + raid_handler_id); + xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5); + csa.ccb_h.func_code = XPT_SASYNC_CB; + csa.event_enable = 0; + csa.callback = mpt_raid_async; + csa.callback_arg = mpt; + xpt_action((union ccb *)&csa); + MPT_UNLOCK(mpt); +} + +static void +mpt_raid_ioc_reset(struct mpt_softc *mpt, int type) +{ + /* Nothing to do yet. */ +} + +static const char *raid_event_txt[] = +{ + "Volume Created", + "Volume Deleted", + "Volume Settings Changed", + "Volume Status Changed", + "Volume Physical Disk Membership Changed", + "Physical Disk Created", + "Physical Disk Deleted", + "Physical Disk Settings Changed", + "Physical Disk Status Changed", + "Domain Validation Required", + "SMART Data Received", + "Replace Action Started", +}; + +static int +mpt_raid_event(struct mpt_softc *mpt, request_t *req, + MSG_EVENT_NOTIFY_REPLY *msg) +{ + EVENT_DATA_RAID *raid_event; + struct mpt_raid_volume *mpt_vol; + struct mpt_raid_disk *mpt_disk; + CONFIG_PAGE_RAID_VOL_0 *vol_pg; + int i; + int print_event; + + if (msg->Event != MPI_EVENT_INTEGRATED_RAID) { + return (0); + } + + raid_event = (EVENT_DATA_RAID *)&msg->Data; + + mpt_vol = NULL; + vol_pg = NULL; + if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) { + for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { + mpt_vol = &mpt->raid_volumes[i]; + vol_pg = mpt_vol->config_page; + + if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) + continue; + + if (vol_pg->VolumeID == raid_event->VolumeID + && vol_pg->VolumeBus == raid_event->VolumeBus) + break; + } + if (i >= mpt->ioc_page2->MaxVolumes) { + mpt_vol = NULL; + vol_pg = NULL; + } + } + + mpt_disk = NULL; + if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) { + mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum; + if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) { + mpt_disk = NULL; + } + } + + print_event = 1; + switch(raid_event->ReasonCode) { + case MPI_EVENT_RAID_RC_VOLUME_CREATED: + case MPI_EVENT_RAID_RC_VOLUME_DELETED: + break; + case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: + if (mpt_vol != NULL) { + if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) { + mpt_vol->flags &= ~MPT_RVF_UP2DATE; + } else { + /* + * Coalesce status messages into one + * per background run of our RAID thread. + * This removes "spurious" status messages + * from our output. + */ + print_event = 0; + } + } + break; + case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED: + case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED: + mpt->raid_rescan++; + if (mpt_vol != NULL) { + mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED); + } + break; + case MPI_EVENT_RAID_RC_PHYSDISK_CREATED: + case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: + mpt->raid_rescan++; + break; + case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED: + case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: + mpt->raid_rescan++; + if (mpt_disk != NULL) { + mpt_disk->flags &= ~MPT_RDF_UP2DATE; + } + break; + case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED: + mpt->raid_rescan++; + break; + case MPI_EVENT_RAID_RC_SMART_DATA: + case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED: + break; + } + + if (print_event) { + if (mpt_disk != NULL) { + mpt_disk_prt(mpt, mpt_disk, ""); + } else if (mpt_vol != NULL) { + mpt_vol_prt(mpt, mpt_vol, ""); + } else { + mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus, + raid_event->VolumeID); + + if (raid_event->PhysDiskNum != 0xFF) + mpt_prtc(mpt, ":%d): ", + raid_event->PhysDiskNum); + else + mpt_prtc(mpt, "): "); + } + + if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt)) + mpt_prtc(mpt, "Unhandled RaidEvent %#x\n", + raid_event->ReasonCode); + else + mpt_prtc(mpt, "%s\n", + raid_event_txt[raid_event->ReasonCode]); + } + + if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) { + /* XXX Use CAM's print sense for this... */ + if (mpt_disk != NULL) + mpt_disk_prt(mpt, mpt_disk, ""); + else + mpt_prt(mpt, "Volume(%d:%d:%d: ", + raid_event->VolumeBus, raid_event->VolumeID, + raid_event->PhysDiskNum); + mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n", + raid_event->ASC, raid_event->ASCQ); + } + + mpt_raid_wakeup(mpt); + return (1); +} + +static void +mpt_raid_shutdown(struct mpt_softc *mpt) +{ + struct mpt_raid_volume *mpt_vol; + + if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) { + return; + } + + mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF; + RAID_VOL_FOREACH(mpt, mpt_vol) { + mpt_verify_mwce(mpt, mpt_vol); + } +} + +static int +mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req, + uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) +{ + int free_req; + + if (req == NULL) + return (TRUE); + + free_req = TRUE; + if (reply_frame != NULL) + free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame); +#ifdef NOTYET + else if (req->ccb != NULL) { + /* Complete Quiesce CCB with error... */ + } +#endif + + req->state &= ~REQ_STATE_QUEUED; + req->state |= REQ_STATE_DONE; + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + + if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { + wakeup(req); + } else if (free_req) { + mpt_free_request(mpt, req); + } + + return (TRUE); +} + +/* + * Parse additional completion information in the reply + * frame for RAID I/O requests. + */ +static int +mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req, + MSG_DEFAULT_REPLY *reply_frame) +{ + MSG_RAID_ACTION_REPLY *reply; + struct mpt_raid_action_result *action_result; + MSG_RAID_ACTION_REQUEST *rap; + + reply = (MSG_RAID_ACTION_REPLY *)reply_frame; + req->IOCStatus = le16toh(reply->IOCStatus); + rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf; + + switch (rap->Action) { + case MPI_RAID_ACTION_QUIESCE_PHYS_IO: + mpt_prt(mpt, "QUIESCE PHYSIO DONE\n"); + break; + case MPI_RAID_ACTION_ENABLE_PHYS_IO: + mpt_prt(mpt, "ENABLY PHYSIO DONE\n"); + break; + default: + break; + } + action_result = REQ_TO_RAID_ACTION_RESULT(req); + memcpy(&action_result->action_data, &reply->ActionData, + sizeof(action_result->action_data)); + action_result->action_status = le16toh(reply->ActionStatus); + return (TRUE); +} + +/* + * Utiltity routine to perform a RAID action command; + */ +int +mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol, + struct mpt_raid_disk *disk, request_t *req, u_int Action, + uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len, + int write, int wait) +{ + MSG_RAID_ACTION_REQUEST *rap; + SGE_SIMPLE32 *se; + + rap = req->req_vbuf; + memset(rap, 0, sizeof *rap); + rap->Action = Action; + rap->ActionDataWord = htole32(ActionDataWord); + rap->Function = MPI_FUNCTION_RAID_ACTION; + rap->VolumeID = vol->config_page->VolumeID; + rap->VolumeBus = vol->config_page->VolumeBus; + if (disk != 0) + rap->PhysDiskNum = disk->config_page.PhysDiskNum; + else + rap->PhysDiskNum = 0xFF; + se = (SGE_SIMPLE32 *)&rap->ActionDataSGE; + se->Address = htole32(addr); + MPI_pSGE_SET_LENGTH(se, len); + MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | + MPI_SGE_FLAGS_END_OF_LIST | + write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)); + se->FlagsLength = htole32(se->FlagsLength); + rap->MsgContext = htole32(req->index | raid_handler_id); + + mpt_check_doorbell(mpt); + mpt_send_cmd(mpt, req); + + if (wait) { + return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, + /*sleep_ok*/FALSE, /*time_ms*/2000)); + } else { + return (0); + } +} + +/*************************** RAID Status Monitoring ***************************/ +static int +mpt_spawn_raid_thread(struct mpt_softc *mpt) +{ + int error; + + /* + * Freeze out any CAM transactions until our thread + * is able to run at least once. We need to update + * our RAID pages before acception I/O or we may + * reject I/O to an ID we later determine is for a + * hidden physdisk. + */ + MPT_LOCK(mpt); + xpt_freeze_simq(mpt->phydisk_sim, 1); + MPT_UNLOCK(mpt); + error = mpt_kthread_create(mpt_raid_thread, mpt, + &mpt->raid_thread, /*flags*/0, /*altstack*/0, + "mpt_raid%d", mpt->unit); + if (error != 0) { + MPT_LOCK(mpt); + xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE); + MPT_UNLOCK(mpt); + } + return (error); +} + +static void +mpt_terminate_raid_thread(struct mpt_softc *mpt) +{ + + if (mpt->raid_thread == NULL) { + return; + } + mpt->shutdwn_raid = 1; + wakeup(mpt->raid_volumes); + /* + * Sleep on a slightly different location + * for this interlock just for added safety. + */ + mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0); +} + +static void +mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb) +{ + + xpt_free_path(ccb->ccb_h.path); + xpt_free_ccb(ccb); +} + +static void +mpt_raid_thread(void *arg) +{ + struct mpt_softc *mpt; + int firstrun; + + mpt = (struct mpt_softc *)arg; + firstrun = 1; + MPT_LOCK(mpt); + while (mpt->shutdwn_raid == 0) { + + if (mpt->raid_wakeup == 0) { + mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0); + continue; + } + + mpt->raid_wakeup = 0; + + if (mpt_refresh_raid_data(mpt)) { + mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */ + continue; + } + + /* + * Now that we have our first snapshot of RAID data, + * allow CAM to access our physical disk bus. + */ + if (firstrun) { + firstrun = 0; + MPTLOCK_2_CAMLOCK(mpt); + xpt_release_simq(mpt->phydisk_sim, TRUE); + CAMLOCK_2_MPTLOCK(mpt); + } + + if (mpt->raid_rescan != 0) { + union ccb *ccb; + struct cam_path *path; + int error; + + mpt->raid_rescan = 0; + MPT_UNLOCK(mpt); + + ccb = xpt_alloc_ccb(); + + MPT_LOCK(mpt); + error = xpt_create_path(&path, xpt_periph, + cam_sim_path(mpt->phydisk_sim), + CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); + if (error != CAM_REQ_CMP) { + xpt_free_ccb(ccb); + mpt_prt(mpt, "Unable to rescan RAID Bus!\n"); + } else { + xpt_setup_ccb(&ccb->ccb_h, path, 5); + ccb->ccb_h.func_code = XPT_SCAN_BUS; + ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback; + ccb->crcn.flags = CAM_FLAG_NONE; + MPTLOCK_2_CAMLOCK(mpt); + xpt_action(ccb); + CAMLOCK_2_MPTLOCK(mpt); + } + } + } + mpt->raid_thread = NULL; + wakeup(&mpt->raid_thread); + MPT_UNLOCK(mpt); + mpt_kthread_exit(0); +} + +#if 0 +static void +mpt_raid_quiesce_timeout(void *arg) +{ + /* Complete the CCB with error */ + /* COWWWW */ +} + +static timeout_t mpt_raid_quiesce_timeout; +cam_status +mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk, + request_t *req) +{ + union ccb *ccb; + + ccb = req->ccb; + if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0) + return (CAM_REQ_CMP); + + if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) { + int rv; + + mpt_disk->flags |= MPT_RDF_QUIESCING; + xpt_freeze_devq(ccb->ccb_h.path, 1); + + rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req, + MPI_RAID_ACTION_QUIESCE_PHYS_IO, + /*ActionData*/0, /*addr*/0, + /*len*/0, /*write*/FALSE, + /*wait*/FALSE); + if (rv != 0) + return (CAM_REQ_CMP_ERR); + + mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz); +#if 0 + if (rv == ETIMEDOUT) { + mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: " + "Quiece Timed-out\n"); + xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0); + return (CAM_REQ_CMP_ERR); + } + + ar = REQ_TO_RAID_ACTION_RESULT(req); + if (rv != 0 + || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS + || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { + mpt_disk_prt(mpt, mpt_disk, "Quiece Failed" + "%d:%x:%x\n", rv, req->IOCStatus, + ar->action_status); + xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0); + return (CAM_REQ_CMP_ERR); + } +#endif + return (CAM_REQ_INPROG); + } + return (CAM_REQUEUE_REQ); +} +#endif + +/* XXX Ignores that there may be multiple busses/IOCs involved. */ +cam_status +mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, u_int *tgt) +{ + struct mpt_raid_disk *mpt_disk; + + mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id; + if (ccb->ccb_h.target_id < mpt->raid_max_disks + && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) { + *tgt = mpt_disk->config_page.PhysDiskID; + return (0); + } + mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n", + ccb->ccb_h.target_id); + return (-1); +} + +/* XXX Ignores that there may be multiple busses/IOCs involved. */ +int +mpt_is_raid_volume(struct mpt_softc *mpt, int tgt) +{ + CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol; + CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol; + + if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { + return (0); + } + ioc_vol = mpt->ioc_page2->RaidVolume; + ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes; + for (;ioc_vol != ioc_last_vol; ioc_vol++) { + if (ioc_vol->VolumeID == tgt) { + return (1); + } + } + return (0); +} + +#if 0 +static void +mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, + int enable) +{ + request_t *req; + struct mpt_raid_action_result *ar; + CONFIG_PAGE_RAID_VOL_0 *vol_pg; + int enabled; + int rv; + + vol_pg = mpt_vol->config_page; + enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED; + + /* + * If the setting matches the configuration, + * there is nothing to do. + */ + if ((enabled && enable) + || (!enabled && !enable)) + return; + + req = mpt_get_request(mpt, /*sleep_ok*/TRUE); + if (req == NULL) { + mpt_vol_prt(mpt, mpt_vol, + "mpt_enable_vol: Get request failed!\n"); + return; + } + + rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, + enable ? MPI_RAID_ACTION_ENABLE_VOLUME + : MPI_RAID_ACTION_DISABLE_VOLUME, + /*data*/0, /*addr*/0, /*len*/0, + /*write*/FALSE, /*wait*/TRUE); + if (rv == ETIMEDOUT) { + mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: " + "%s Volume Timed-out\n", + enable ? "Enable" : "Disable"); + return; + } + ar = REQ_TO_RAID_ACTION_RESULT(req); + if (rv != 0 + || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS + || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { + mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n", + enable ? "Enable" : "Disable", + rv, req->IOCStatus, ar->action_status); + } + + mpt_free_request(mpt, req); +} +#endif + +static void +mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol) +{ + request_t *req; + struct mpt_raid_action_result *ar; + CONFIG_PAGE_RAID_VOL_0 *vol_pg; + uint32_t data; + int rv; + int resyncing; + int mwce; + + vol_pg = mpt_vol->config_page; + resyncing = vol_pg->VolumeStatus.Flags + & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS; + mwce = vol_pg->VolumeSettings.Settings + & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; + + /* + * If the setting matches the configuration, + * there is nothing to do. + */ + switch (mpt->raid_mwce_setting) { + case MPT_RAID_MWCE_REBUILD_ONLY: + if ((resyncing && mwce) || (!resyncing && !mwce)) { + return; + } + mpt_vol->flags ^= MPT_RVF_WCE_CHANGED; + if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) { + /* + * Wait one more status update to see if + * resyncing gets enabled. It gets disabled + * temporarilly when WCE is changed. + */ + return; + } + break; + case MPT_RAID_MWCE_ON: + if (mwce) + return; + break; + case MPT_RAID_MWCE_OFF: + if (!mwce) + return; + break; + case MPT_RAID_MWCE_NC: + return; + } + + req = mpt_get_request(mpt, /*sleep_ok*/TRUE); + if (req == NULL) { + mpt_vol_prt(mpt, mpt_vol, + "mpt_verify_mwce: Get request failed!\n"); + return; + } + + vol_pg->VolumeSettings.Settings ^= + MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; + memcpy(&data, &vol_pg->VolumeSettings, sizeof(data)); + vol_pg->VolumeSettings.Settings ^= + MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; + rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, + MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS, + data, /*addr*/0, /*len*/0, + /*write*/FALSE, /*wait*/TRUE); + if (rv == ETIMEDOUT) { + mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: " + "Write Cache Enable Timed-out\n"); + return; + } + ar = REQ_TO_RAID_ACTION_RESULT(req); + if (rv != 0 + || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS + || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { + mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: " + "%d:%x:%x\n", rv, req->IOCStatus, + ar->action_status); + } else { + vol_pg->VolumeSettings.Settings ^= + MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; + } + mpt_free_request(mpt, req); +} + +static void +mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol) +{ + request_t *req; + struct mpt_raid_action_result *ar; + CONFIG_PAGE_RAID_VOL_0 *vol_pg; + u_int prio; + int rv; + + vol_pg = mpt_vol->config_page; + + if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC) + return; + + /* + * If the current RAID resync rate does not + * match our configured rate, update it. + */ + prio = vol_pg->VolumeSettings.Settings + & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; + if (vol_pg->ResyncRate != 0 + && vol_pg->ResyncRate != mpt->raid_resync_rate) { + + req = mpt_get_request(mpt, /*sleep_ok*/TRUE); + if (req == NULL) { + mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: " + "Get request failed!\n"); + return; + } + + rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, + MPI_RAID_ACTION_SET_RESYNC_RATE, + mpt->raid_resync_rate, /*addr*/0, + /*len*/0, /*write*/FALSE, /*wait*/TRUE); + if (rv == ETIMEDOUT) { + mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: " + "Resync Rate Setting Timed-out\n"); + return; + } + + ar = REQ_TO_RAID_ACTION_RESULT(req); + if (rv != 0 + || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS + || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { + mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: " + "%d:%x:%x\n", rv, req->IOCStatus, + ar->action_status); + } else + vol_pg->ResyncRate = mpt->raid_resync_rate; + mpt_free_request(mpt, req); + } else if ((prio && mpt->raid_resync_rate < 128) + || (!prio && mpt->raid_resync_rate >= 128)) { + uint32_t data; + + req = mpt_get_request(mpt, /*sleep_ok*/TRUE); + if (req == NULL) { + mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: " + "Get request failed!\n"); + return; + } + + vol_pg->VolumeSettings.Settings ^= + MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; + memcpy(&data, &vol_pg->VolumeSettings, sizeof(data)); + vol_pg->VolumeSettings.Settings ^= + MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; + rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, + MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS, + data, /*addr*/0, /*len*/0, + /*write*/FALSE, /*wait*/TRUE); + if (rv == ETIMEDOUT) { + mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: " + "Resync Rate Setting Timed-out\n"); + return; + } + ar = REQ_TO_RAID_ACTION_RESULT(req); + if (rv != 0 + || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS + || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { + mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: " + "%d:%x:%x\n", rv, req->IOCStatus, + ar->action_status); + } else { + vol_pg->VolumeSettings.Settings ^= + MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; + } + + mpt_free_request(mpt, req); + } +} + +static void +mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, + struct cam_path *path) +{ + struct ccb_relsim crs; + + xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5); + crs.ccb_h.func_code = XPT_REL_SIMQ; + crs.release_flags = RELSIM_ADJUST_OPENINGS; + crs.openings = mpt->raid_queue_depth; + xpt_action((union ccb *)&crs); + if (crs.ccb_h.status != CAM_REQ_CMP) + mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed " + "with CAM status %#x\n", crs.ccb_h.status); +} + +static void +mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol) +{ + CONFIG_PAGE_RAID_VOL_0 *vol_pg; + u_int i; + + vol_pg = mpt_vol->config_page; + mpt_vol_prt(mpt, mpt_vol, "Settings ("); + for (i = 1; i <= 0x8000; i <<= 1) { + switch (vol_pg->VolumeSettings.Settings & i) { + case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE: + mpt_prtc(mpt, " Member-WCE"); + break; + case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART: + mpt_prtc(mpt, " Offline-On-SMART-Err"); + break; + case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE: + mpt_prtc(mpt, " Hot-Plug-Spares"); + break; + case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC: + mpt_prtc(mpt, " High-Priority-ReSync"); + break; + default: + break; + } + } + mpt_prtc(mpt, " )\n"); + if (vol_pg->VolumeSettings.HotSparePool != 0) { + mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s", + powerof2(vol_pg->VolumeSettings.HotSparePool) + ? ":" : "s:"); + for (i = 0; i < 8; i++) { + u_int mask; + + mask = 0x1 << i; + if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0) + continue; + mpt_prtc(mpt, " %d", i); + } + mpt_prtc(mpt, "\n"); + } + mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks); + for (i = 0; i < vol_pg->NumPhysDisks; i++){ + struct mpt_raid_disk *mpt_disk; + CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg; + int pt_bus = cam_sim_bus(mpt->phydisk_sim); + U8 f, s; + + mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum; + disk_pg = &mpt_disk->config_page; + mpt_prtc(mpt, " "); + mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev), + pt_bus, disk_pg->PhysDiskID); + if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) { + mpt_prtc(mpt, "%s", mpt_disk->member_number == 0? + "Primary" : "Secondary"); + } else { + mpt_prtc(mpt, "Stripe Position %d", + mpt_disk->member_number); + } + f = disk_pg->PhysDiskStatus.Flags; + s = disk_pg->PhysDiskStatus.State; + if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) { + mpt_prtc(mpt, " Out of Sync"); + } + if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) { + mpt_prtc(mpt, " Quiesced"); + } + if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) { + mpt_prtc(mpt, " Inactive"); + } + if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) { + mpt_prtc(mpt, " Was Optimal"); + } + if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) { + mpt_prtc(mpt, " Was Non-Optimal"); + } + switch (s) { + case MPI_PHYSDISK0_STATUS_ONLINE: + mpt_prtc(mpt, " Online"); + break; + case MPI_PHYSDISK0_STATUS_MISSING: + mpt_prtc(mpt, " Missing"); + break; + case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE: + mpt_prtc(mpt, " Incompatible"); + break; + case MPI_PHYSDISK0_STATUS_FAILED: + mpt_prtc(mpt, " Failed"); + break; + case MPI_PHYSDISK0_STATUS_INITIALIZING: + mpt_prtc(mpt, " Initializing"); + break; + case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED: + mpt_prtc(mpt, " Requested Offline"); + break; + case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED: + mpt_prtc(mpt, " Requested Failed"); + break; + case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE: + default: + mpt_prtc(mpt, " Offline Other (%x)", s); + break; + } + mpt_prtc(mpt, "\n"); + } +} + +static void +mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk) +{ + CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg; + int rd_bus = cam_sim_bus(mpt->sim); + int pt_bus = cam_sim_bus(mpt->phydisk_sim); + u_int i; + + disk_pg = &mpt_disk->config_page; + mpt_disk_prt(mpt, mpt_disk, + "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n", + device_get_nameunit(mpt->dev), rd_bus, + disk_pg->PhysDiskID, device_get_nameunit(mpt->dev), + pt_bus, mpt_disk - mpt->raid_disks); + if (disk_pg->PhysDiskSettings.HotSparePool == 0) + return; + mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s", + powerof2(disk_pg->PhysDiskSettings.HotSparePool) + ? ":" : "s:"); + for (i = 0; i < 8; i++) { + u_int mask; + + mask = 0x1 << i; + if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0) + continue; + mpt_prtc(mpt, " %d", i); + } + mpt_prtc(mpt, "\n"); +} + +static void +mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk, + IOC_3_PHYS_DISK *ioc_disk) +{ + int rv; + + rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, + /*PageNumber*/0, ioc_disk->PhysDiskNum, + &mpt_disk->config_page.Header, + /*sleep_ok*/TRUE, /*timeout_ms*/5000); + if (rv != 0) { + mpt_prt(mpt, "mpt_refresh_raid_disk: " + "Failed to read RAID Disk Hdr(%d)\n", + ioc_disk->PhysDiskNum); + return; + } + rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum, + &mpt_disk->config_page.Header, + sizeof(mpt_disk->config_page), + /*sleep_ok*/TRUE, /*timeout_ms*/5000); + if (rv != 0) + mpt_prt(mpt, "mpt_refresh_raid_disk: " + "Failed to read RAID Disk Page(%d)\n", + ioc_disk->PhysDiskNum); + mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page); +} + +static void +mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, + CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol) +{ + CONFIG_PAGE_RAID_VOL_0 *vol_pg; + struct mpt_raid_action_result *ar; + request_t *req; + int rv; + int i; + + vol_pg = mpt_vol->config_page; + mpt_vol->flags &= ~MPT_RVF_UP2DATE; + + rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0, + ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000); + if (rv != 0) { + mpt_vol_prt(mpt, mpt_vol, + "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n", + ioc_vol->VolumePageNumber); + return; + } + + rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber, + &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000); + if (rv != 0) { + mpt_vol_prt(mpt, mpt_vol, + "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n", + ioc_vol->VolumePageNumber); + return; + } + mpt2host_config_page_raid_vol_0(vol_pg); + + mpt_vol->flags |= MPT_RVF_ACTIVE; + + /* Update disk entry array data. */ + for (i = 0; i < vol_pg->NumPhysDisks; i++) { + struct mpt_raid_disk *mpt_disk; + mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum; + mpt_disk->volume = mpt_vol; + mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap; + if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) { + mpt_disk->member_number--; + } + } + + if ((vol_pg->VolumeStatus.Flags + & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0) + return; + + req = mpt_get_request(mpt, TRUE); + if (req == NULL) { + mpt_vol_prt(mpt, mpt_vol, + "mpt_refresh_raid_vol: Get request failed!\n"); + return; + } + rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req, + MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE); + if (rv == ETIMEDOUT) { + mpt_vol_prt(mpt, mpt_vol, + "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n"); + mpt_free_request(mpt, req); + return; + } + + ar = REQ_TO_RAID_ACTION_RESULT(req); + if (rv == 0 + && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS + && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) { + memcpy(&mpt_vol->sync_progress, + &ar->action_data.indicator_struct, + sizeof(mpt_vol->sync_progress)); + mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress); + } else { + mpt_vol_prt(mpt, mpt_vol, + "mpt_refresh_raid_vol: Progress indicator fetch failed!\n"); + } + mpt_free_request(mpt, req); +} + +/* + * Update in-core information about RAID support. We update any entries + * that didn't previously exists or have been marked as needing to + * be updated by our event handler. Interesting changes are displayed + * to the console. + */ +int +mpt_refresh_raid_data(struct mpt_softc *mpt) +{ + CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol; + CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol; + IOC_3_PHYS_DISK *ioc_disk; + IOC_3_PHYS_DISK *ioc_last_disk; + CONFIG_PAGE_RAID_VOL_0 *vol_pg; + size_t len; + int rv; + int i; + u_int nonopt_volumes; + + if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) { + return (0); + } + + /* + * Mark all items as unreferenced by the configuration. + * This allows us to find, report, and discard stale + * entries. + */ + for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) { + mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED; + } + for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { + mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED; + } + + /* + * Get Physical Disk information. + */ + len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t); + rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, + &mpt->ioc_page3->Header, len, + /*sleep_ok*/TRUE, /*timeout_ms*/5000); + if (rv) { + mpt_prt(mpt, + "mpt_refresh_raid_data: Failed to read IOC Page 3\n"); + return (-1); + } + mpt2host_config_page_ioc3(mpt->ioc_page3); + + ioc_disk = mpt->ioc_page3->PhysDisk; + ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks; + for (; ioc_disk != ioc_last_disk; ioc_disk++) { + struct mpt_raid_disk *mpt_disk; + + mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum; + mpt_disk->flags |= MPT_RDF_REFERENCED; + if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) + != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) { + + mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk); + + } + mpt_disk->flags |= MPT_RDF_ACTIVE; + mpt->raid_rescan++; + } + + /* + * Refresh volume data. + */ + len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t); + rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, + &mpt->ioc_page2->Header, len, + /*sleep_ok*/TRUE, /*timeout_ms*/5000); + if (rv) { + mpt_prt(mpt, "mpt_refresh_raid_data: " + "Failed to read IOC Page 2\n"); + return (-1); + } + mpt2host_config_page_ioc2(mpt->ioc_page2); + + ioc_vol = mpt->ioc_page2->RaidVolume; + ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes; + for (;ioc_vol != ioc_last_vol; ioc_vol++) { + struct mpt_raid_volume *mpt_vol; + + mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber; + mpt_vol->flags |= MPT_RVF_REFERENCED; + vol_pg = mpt_vol->config_page; + if (vol_pg == NULL) + continue; + if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE)) + != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE)) + || (vol_pg->VolumeStatus.Flags + & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) { + + mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol); + } + mpt_vol->flags |= MPT_RVF_ACTIVE; + } + + nonopt_volumes = 0; + for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { + struct mpt_raid_volume *mpt_vol; + uint64_t total; + uint64_t left; + int m; + u_int prio; + + mpt_vol = &mpt->raid_volumes[i]; + + if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) { + continue; + } + + vol_pg = mpt_vol->config_page; + if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED)) + == MPT_RVF_ANNOUNCED) { + mpt_vol_prt(mpt, mpt_vol, "No longer configured\n"); + mpt_vol->flags = 0; + continue; + } + + if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) { + mpt_announce_vol(mpt, mpt_vol); + mpt_vol->flags |= MPT_RVF_ANNOUNCED; + } + + if (vol_pg->VolumeStatus.State != + MPI_RAIDVOL0_STATUS_STATE_OPTIMAL) + nonopt_volumes++; + + if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) + continue; + + mpt_vol->flags |= MPT_RVF_UP2DATE; + mpt_vol_prt(mpt, mpt_vol, "%s - %s\n", + mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol)); + mpt_verify_mwce(mpt, mpt_vol); + + if (vol_pg->VolumeStatus.Flags == 0) { + continue; + } + + mpt_vol_prt(mpt, mpt_vol, "Status ("); + for (m = 1; m <= 0x80; m <<= 1) { + switch (vol_pg->VolumeStatus.Flags & m) { + case MPI_RAIDVOL0_STATUS_FLAG_ENABLED: + mpt_prtc(mpt, " Enabled"); + break; + case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED: + mpt_prtc(mpt, " Quiesced"); + break; + case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS: + mpt_prtc(mpt, " Re-Syncing"); + break; + case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE: + mpt_prtc(mpt, " Inactive"); + break; + default: + break; + } + } + mpt_prtc(mpt, " )\n"); + + if ((vol_pg->VolumeStatus.Flags + & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0) + continue; + + mpt_verify_resync_rate(mpt, mpt_vol); + + left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining); + total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks); + if (vol_pg->ResyncRate != 0) { + + prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF; + mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n", + prio / 1000, prio % 1000); + } else { + prio = vol_pg->VolumeSettings.Settings + & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; + mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n", + prio ? "High" : "Low"); + } +#if __FreeBSD_version >= 500000 + mpt_vol_prt(mpt, mpt_vol, "%ju of %ju " + "blocks remaining\n", (uintmax_t)left, + (uintmax_t)total); +#else + mpt_vol_prt(mpt, mpt_vol, "%llu of %llu " + "blocks remaining\n", (uint64_t)left, + (uint64_t)total); +#endif + + /* Periodically report on sync progress. */ + mpt_schedule_raid_refresh(mpt); + } + + for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) { + struct mpt_raid_disk *mpt_disk; + CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg; + int m; + + mpt_disk = &mpt->raid_disks[i]; + disk_pg = &mpt_disk->config_page; + + if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) + continue; + + if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED)) + == MPT_RDF_ANNOUNCED) { + mpt_disk_prt(mpt, mpt_disk, "No longer configured\n"); + mpt_disk->flags = 0; + mpt->raid_rescan++; + continue; + } + + if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) { + + mpt_announce_disk(mpt, mpt_disk); + mpt_disk->flags |= MPT_RVF_ANNOUNCED; + } + + if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0) + continue; + + mpt_disk->flags |= MPT_RDF_UP2DATE; + mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk)); + if (disk_pg->PhysDiskStatus.Flags == 0) + continue; + + mpt_disk_prt(mpt, mpt_disk, "Status ("); + for (m = 1; m <= 0x80; m <<= 1) { + switch (disk_pg->PhysDiskStatus.Flags & m) { + case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC: + mpt_prtc(mpt, " Out-Of-Sync"); + break; + case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED: + mpt_prtc(mpt, " Quiesced"); + break; + default: + break; + } + } + mpt_prtc(mpt, " )\n"); + } + + mpt->raid_nonopt_volumes = nonopt_volumes; + return (0); +} + +static void +mpt_raid_timer(void *arg) +{ + struct mpt_softc *mpt; + + mpt = (struct mpt_softc *)arg; + MPT_LOCK(mpt); + mpt_raid_wakeup(mpt); + MPT_UNLOCK(mpt); +} + +void +mpt_schedule_raid_refresh(struct mpt_softc *mpt) +{ + callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL, + mpt_raid_timer, mpt); +} + +void +mpt_raid_free_mem(struct mpt_softc *mpt) +{ + + if (mpt->raid_volumes) { + struct mpt_raid_volume *mpt_raid; + int i; + for (i = 0; i < mpt->raid_max_volumes; i++) { + mpt_raid = &mpt->raid_volumes[i]; + if (mpt_raid->config_page) { + kfree(mpt_raid->config_page, M_DEVBUF); + mpt_raid->config_page = NULL; + } + } + kfree(mpt->raid_volumes, M_DEVBUF); + mpt->raid_volumes = NULL; + } + if (mpt->raid_disks) { + kfree(mpt->raid_disks, M_DEVBUF); + mpt->raid_disks = NULL; + } + if (mpt->ioc_page2) { + kfree(mpt->ioc_page2, M_DEVBUF); + mpt->ioc_page2 = NULL; + } + if (mpt->ioc_page3) { + kfree(mpt->ioc_page3, M_DEVBUF); + mpt->ioc_page3 = NULL; + } + mpt->raid_max_volumes = 0; + mpt->raid_max_disks = 0; +} + +#if __FreeBSD_version >= 500000 +static int +mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate) +{ + struct mpt_raid_volume *mpt_vol; + + if ((rate > MPT_RAID_RESYNC_RATE_MAX + || rate < MPT_RAID_RESYNC_RATE_MIN) + && rate != MPT_RAID_RESYNC_RATE_NC) + return (EINVAL); + + MPT_LOCK(mpt); + mpt->raid_resync_rate = rate; + RAID_VOL_FOREACH(mpt, mpt_vol) { + if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) { + continue; + } + mpt_verify_resync_rate(mpt, mpt_vol); + } + MPT_UNLOCK(mpt); + return (0); +} + +static int +mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth) +{ + struct mpt_raid_volume *mpt_vol; + + if (vol_queue_depth > 255 || vol_queue_depth < 1) + return (EINVAL); + + MPT_LOCK(mpt); + mpt->raid_queue_depth = vol_queue_depth; + RAID_VOL_FOREACH(mpt, mpt_vol) { + struct cam_path *path; + int error; + + if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) + continue; + + mpt->raid_rescan = 0; + + MPTLOCK_2_CAMLOCK(mpt); + error = xpt_create_path(&path, xpt_periph, + cam_sim_path(mpt->sim), + mpt_vol->config_page->VolumeID, + /*lun*/0); + if (error != CAM_REQ_CMP) { + CAMLOCK_2_MPTLOCK(mpt); + mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n"); + continue; + } + mpt_adjust_queue_depth(mpt, mpt_vol, path); + xpt_free_path(path); + CAMLOCK_2_MPTLOCK(mpt); + } + MPT_UNLOCK(mpt); + return (0); +} + +static int +mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce) +{ + struct mpt_raid_volume *mpt_vol; + int force_full_resync; + + MPT_LOCK(mpt); + if (mwce == mpt->raid_mwce_setting) { + MPT_UNLOCK(mpt); + return (0); + } + + /* + * Catch MWCE being left on due to a failed shutdown. Since + * sysctls cannot be set by the loader, we treat the first + * setting of this varible specially and force a full volume + * resync if MWCE is enabled and a resync is in progress. + */ + force_full_resync = 0; + if (mpt->raid_mwce_set == 0 + && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC + && mwce == MPT_RAID_MWCE_REBUILD_ONLY) + force_full_resync = 1; + + mpt->raid_mwce_setting = mwce; + RAID_VOL_FOREACH(mpt, mpt_vol) { + CONFIG_PAGE_RAID_VOL_0 *vol_pg; + int resyncing; + int mwce; + + if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) + continue; + + vol_pg = mpt_vol->config_page; + resyncing = vol_pg->VolumeStatus.Flags + & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS; + mwce = vol_pg->VolumeSettings.Settings + & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; + if (force_full_resync && resyncing && mwce) { + + /* + * XXX disable/enable volume should force a resync, + * but we'll need to queice, drain, and restart + * I/O to do that. + */ + mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown " + "detected. Suggest full resync.\n"); + } + mpt_verify_mwce(mpt, mpt_vol); + } + mpt->raid_mwce_set = 1; + MPT_UNLOCK(mpt); + return (0); +} +const char *mpt_vol_mwce_strs[] = +{ + "On", + "Off", + "On-During-Rebuild", + "NC" +}; + +static int +mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS) +{ + char inbuf[20]; + struct mpt_softc *mpt; + const char *str; + int error; + u_int size; + u_int i; + + GIANT_REQUIRED; + + mpt = (struct mpt_softc *)arg1; + str = mpt_vol_mwce_strs[mpt->raid_mwce_setting]; + error = SYSCTL_OUT(req, str, strlen(str) + 1); + if (error || !req->newptr) { + return (error); + } + + size = req->newlen - req->newidx; + if (size >= sizeof(inbuf)) { + return (EINVAL); + } + + error = SYSCTL_IN(req, inbuf, size); + if (error) { + return (error); + } + inbuf[size] = '\0'; + for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) { + if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) { + return (mpt_raid_set_vol_mwce(mpt, i)); + } + } + return (EINVAL); +} + +static int +mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS) +{ + struct mpt_softc *mpt; + u_int raid_resync_rate; + int error; + + GIANT_REQUIRED; + + mpt = (struct mpt_softc *)arg1; + raid_resync_rate = mpt->raid_resync_rate; + + error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req); + if (error || !req->newptr) { + return error; + } + + return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate)); +} + +static int +mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS) +{ + struct mpt_softc *mpt; + u_int raid_queue_depth; + int error; + + GIANT_REQUIRED; + + mpt = (struct mpt_softc *)arg1; + raid_queue_depth = mpt->raid_queue_depth; + + error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req); + if (error || !req->newptr) { + return error; + } + + return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth)); +} + +static void +mpt_raid_sysctl_attach(struct mpt_softc *mpt) +{ + struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); + struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); + + SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, + "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0, + mpt_raid_sysctl_vol_member_wce, "A", + "volume member WCE(On,Off,On-During-Rebuild,NC)"); + + SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, + "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0, + mpt_raid_sysctl_vol_queue_depth, "I", + "default volume queue depth"); + + SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, + "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0, + mpt_raid_sysctl_vol_resync_rate, "I", + "volume resync priority (0 == NC, 1 - 255)"); + SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, + "nonoptimal_volumes", CTLFLAG_RD, + &mpt->raid_nonopt_volumes, 0, + "number of nonoptimal volumes"); +} +#endif diff --git a/sys/dev/disk/mpt/mpt_raid.h b/sys/dev/disk/mpt/mpt_raid.h new file mode 100644 index 0000000000..24f18e43a1 --- /dev/null +++ b/sys/dev/disk/mpt/mpt_raid.h @@ -0,0 +1,103 @@ +/* $FreeBSD: src/sys/dev/mpt/mpt_raid.h,v 1.7 2006/07/16 03:31:01 mjacob Exp $ */ +/*- + * Definitions for the integrated RAID features LSI MPT Fusion adapters. + * + * Copyright (c) 2005, WHEEL Sp. z o.o. + * Copyright (c) 2004, 2005 Justin T. Gibbs + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the names of the above listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/*- + * Some Breakage and Bug Fixing added later. + * Copyright (c) 2006, by Matthew Jacob + * All Rights Reserved + * + * Support from LSI-Logic has also gone a great deal toward making this a + * workable subsystem and is gratefully acknowledged. + */ +#ifndef _MPT_RAID_H_ +#define _MPT_RAID_H_ + +#include +union ccb; + +typedef enum { + MPT_RAID_MWCE_ON, + MPT_RAID_MWCE_OFF, + MPT_RAID_MWCE_REBUILD_ONLY, + MPT_RAID_MWCE_NC +} mpt_raid_mwce_t; + +const char *mpt_vol_type(struct mpt_raid_volume *); +const char *mpt_vol_state(struct mpt_raid_volume *); +const char *mpt_disk_state(struct mpt_raid_disk *); +void +mpt_vol_prt(struct mpt_softc *, struct mpt_raid_volume *, const char *fmt, ...); +void +mpt_disk_prt(struct mpt_softc *, struct mpt_raid_disk *, const char *, ...); + +int +mpt_issue_raid_req(struct mpt_softc *, struct mpt_raid_volume *, + struct mpt_raid_disk *, request_t *, u_int, uint32_t, bus_addr_t, + bus_size_t, int, int); + +cam_status +mpt_map_physdisk(struct mpt_softc *, union ccb *, target_id_t *); +int mpt_is_raid_volume(struct mpt_softc *, int); +#if 0 +cam_status +mpt_raid_quiesce_disk(struct mpt_softc *, struct mpt_raid_disk *, request_t *); +#endif + +int mpt_refresh_raid_data(struct mpt_softc *); +void mpt_schedule_raid_refresh(struct mpt_softc *); +void mpt_raid_free_mem(struct mpt_softc *); + +static __inline void +mpt_raid_wakeup(struct mpt_softc *mpt) +{ + mpt->raid_wakeup++; + wakeup(&mpt->raid_volumes); +} + +#define MPT_RAID_SYNC_REPORT_INTERVAL (15 * 60 * hz) +#define MPT_RAID_RESYNC_RATE_MAX (255) +#define MPT_RAID_RESYNC_RATE_MIN (1) +#define MPT_RAID_RESYNC_RATE_NC (0) +#define MPT_RAID_RESYNC_RATE_DEFAULT MPT_RAID_RESYNC_RATE_NC + +#define MPT_RAID_QUEUE_DEPTH_DEFAULT (128) + +#define MPT_RAID_MWCE_DEFAULT MPT_RAID_MWCE_NC + +#define RAID_VOL_FOREACH(mpt, mpt_vol) \ + for (mpt_vol = (mpt)->raid_volumes; \ + mpt_vol != (mpt)->raid_volumes + (mpt)->raid_max_volumes; \ + mpt_vol++) + +#endif /*_MPT_RAID_H_ */ diff --git a/sys/dev/disk/mpt/mpt_reg.h b/sys/dev/disk/mpt/mpt_reg.h new file mode 100644 index 0000000000..6476c6019b --- /dev/null +++ b/sys/dev/disk/mpt/mpt_reg.h @@ -0,0 +1,159 @@ +/* $FreeBSD: src/sys/dev/mpt/mpt_reg.h,v 1.4 2006/05/29 20:34:28 mjacob Exp $ */ +/*- + * Generic defines for LSI '909 FC adapters. + * FreeBSD Version. + * + * Copyright (c) 2000, 2001 by Greg Ansley + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice immediately at the beginning of the file, without modification, + * this list of conditions, and the following disclaimer. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/*- + * Copyright (c) 2002, 2006 by Matthew Jacob + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon including + * a substantially similar Disclaimer requirement for further binary + * redistribution. + * 3. Neither the names of the above listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT + * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Support from Chris Ellsworth in order to make SAS adapters work + * is gratefully acknowledged. + * + * Support from LSI-Logic has also gone a great deal toward making this a + * workable subsystem and is gratefully acknowledged. + */ +#ifndef _MPT_REG_H_ +#define _MPT_REG_H_ + +#define MPT_OFFSET_DOORBELL 0x00 +#define MPT_OFFSET_SEQUENCE 0x04 +#define MPT_OFFSET_DIAGNOSTIC 0x08 +#define MPT_OFFSET_TEST 0x0C +#define MPT_OFFSET_DIAG_DATA 0x10 +#define MPT_OFFSET_DIAG_ADDR 0x14 +#define MPT_OFFSET_INTR_STATUS 0x30 +#define MPT_OFFSET_INTR_MASK 0x34 +#define MPT_OFFSET_REQUEST_Q 0x40 +#define MPT_OFFSET_REPLY_Q 0x44 +#define MPT_OFFSET_HOST_INDEX 0x50 +#define MPT_OFFSET_FUBAR 0x90 + +/* Bit Maps for DOORBELL register */ +enum DB_STATE_BITS { + MPT_DB_STATE_RESET = 0x00000000, + MPT_DB_STATE_READY = 0x10000000, + MPT_DB_STATE_RUNNING = 0x20000000, + MPT_DB_STATE_FAULT = 0x40000000, + MPT_DB_STATE_MASK = 0xf0000000 +}; + +#define MPT_STATE(v) ((enum DB_STATE_BITS)((v) & MPT_DB_STATE_MASK)) + +#define MPT_DB_LENGTH_SHIFT (16) +#define MPT_DB_DATA_MASK (0xffff) + +#define MPT_DB_DB_USED 0x08000000 +#define MPT_DB_IS_IN_USE(v) (((v) & MPT_DB_DB_USED) != 0) + +/* + * "Whom" initializor values + */ +#define MPT_DB_INIT_NOONE 0x00 +#define MPT_DB_INIT_BIOS 0x01 +#define MPT_DB_INIT_ROMBIOS 0x02 +#define MPT_DB_INIT_PCIPEER 0x03 +#define MPT_DB_INIT_HOST 0x04 +#define MPT_DB_INIT_MANUFACTURE 0x05 + +#define MPT_WHO(v) \ + ((v & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT) + +/* Function Maps for DOORBELL register */ +enum DB_FUNCTION_BITS { + MPT_FUNC_IOC_RESET = 0x40000000, + MPT_FUNC_UNIT_RESET = 0x41000000, + MPT_FUNC_HANDSHAKE = 0x42000000, + MPT_FUNC_REPLY_REMOVE = 0x43000000, + MPT_FUNC_MASK = 0xff000000 +}; + +/* Function Maps for INTERRUPT request register */ +enum _MPT_INTR_REQ_BITS { + MPT_INTR_DB_BUSY = 0x80000000, + MPT_INTR_REPLY_READY = 0x00000008, + MPT_INTR_DB_READY = 0x00000001 +}; + +#define MPT_DB_IS_BUSY(v) (((v) & MPT_INTR_DB_BUSY) != 0) +#define MPT_DB_INTR(v) (((v) & MPT_INTR_DB_READY) != 0) +#define MPT_REPLY_INTR(v) (((v) & MPT_INTR_REPLY_READY) != 0) + +/* Function Maps for INTERRUPT mask register */ +enum _MPT_INTR_MASK_BITS { + MPT_INTR_REPLY_MASK = 0x00000008, + MPT_INTR_DB_MASK = 0x00000001 +}; + +/* Magic addresses in diagnostic memory space */ +#define MPT_DIAG_IOP_BASE (0x00000000) +#define MPT_DIAG_IOP_SIZE (0x00002000) +#define MPT_DIAG_GPIO (0x00030010) +#define MPT_DIAG_IOPQ_REG_BASE0 (0x00050004) +#define MPT_DIAG_IOPQ_REG_BASE1 (0x00051004) +#define MPT_DIAG_CTX0_BASE (0x000E0000) +#define MPT_DIAG_CTX0_SIZE (0x00002000) +#define MPT_DIAG_CTX1_BASE (0x001E0000) +#define MPT_DIAG_CTX1_SIZE (0x00002000) +#define MPT_DIAG_FLASH_BASE (0x00800000) +#define MPT_DIAG_RAM_BASE (0x01000000) +#define MPT_DIAG_RAM_SIZE (0x00400000) +#define MPT_DIAG_MEM_CFG_BASE (0x3F000000) +#define MPT_DIAG_MEM_CFG_BADFL (0x04000000) + +/* GPIO bit assignments */ +#define MPT_DIAG_GPIO_SCL (0x00010000) +#define MPT_DIAG_GPIO_SDA_OUT (0x00008000) +#define MPT_DIAG_GPIO_SDA_IN (0x00004000) + +#define MPT_REPLY_EMPTY (0xFFFFFFFF) /* Reply Queue Empty Symbol */ +#endif /* _MPT_REG_H_ */ diff --git a/sys/dev/disk/mpt/mpt_user.c b/sys/dev/disk/mpt/mpt_user.c new file mode 100644 index 0000000000..a11d880268 --- /dev/null +++ b/sys/dev/disk/mpt/mpt_user.c @@ -0,0 +1,795 @@ +/*- + * Copyright (c) 2008 Yahoo!, Inc. + * All rights reserved. + * Written by: John Baldwin + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * LSI MPT-Fusion Host Adapter FreeBSD userland interface + * $FreeBSD: src/sys/dev/mpt/mpt_user.c,v 1.4 2009/05/20 17:29:21 imp Exp $ + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include + +struct mpt_user_raid_action_result { + uint32_t volume_status; + uint32_t action_data[4]; + uint16_t action_status; +}; + +struct mpt_page_memory { + bus_dma_tag_t tag; + bus_dmamap_t map; + bus_addr_t paddr; + void *vaddr; +}; + +static mpt_probe_handler_t mpt_user_probe; +static mpt_attach_handler_t mpt_user_attach; +static mpt_enable_handler_t mpt_user_enable; +static mpt_ready_handler_t mpt_user_ready; +static mpt_event_handler_t mpt_user_event; +static mpt_reset_handler_t mpt_user_reset; +static mpt_detach_handler_t mpt_user_detach; + +static struct mpt_personality mpt_user_personality = { + .name = "mpt_user", + .probe = mpt_user_probe, + .attach = mpt_user_attach, + .enable = mpt_user_enable, + .ready = mpt_user_ready, + .event = mpt_user_event, + .reset = mpt_user_reset, + .detach = mpt_user_detach, +}; + +DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND); + +static mpt_reply_handler_t mpt_user_reply_handler; + +static int mpt_open(struct dev_open_args *ap); +static int mpt_close(struct dev_close_args *ap); +static int mpt_ioctl(struct dev_ioctl_args *ap); + +static struct dev_ops mpt_cdevsw = { + .d_open = mpt_open, + .d_close = mpt_close, + .d_ioctl = mpt_ioctl, +}; + +static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls"); + +static uint32_t user_handler_id = MPT_HANDLER_ID_NONE; + +int +mpt_user_probe(struct mpt_softc *mpt) +{ + + /* Attach to every controller. */ + return (0); +} + +int +mpt_user_attach(struct mpt_softc *mpt) +{ + mpt_handler_t handler; + int error, unit; + + MPT_LOCK(mpt); + handler.reply_handler = mpt_user_reply_handler; + error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, + &user_handler_id); + MPT_UNLOCK(mpt); + if (error != 0) { + mpt_prt(mpt, "Unable to register user handler!\n"); + return (error); + } + unit = device_get_unit(mpt->dev); + mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, + "mpt%d", unit); + if (mpt->cdev == NULL) { + MPT_LOCK(mpt); + mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, + user_handler_id); + MPT_UNLOCK(mpt); + return (ENOMEM); + } + mpt->cdev->si_drv1 = mpt; + return (0); +} + +int +mpt_user_enable(struct mpt_softc *mpt) +{ + + return (0); +} + +void +mpt_user_ready(struct mpt_softc *mpt) +{ +} + +int +mpt_user_event(struct mpt_softc *mpt, request_t *req, + MSG_EVENT_NOTIFY_REPLY *msg) +{ + + /* Someday we may want to let a user daemon listen for events? */ + return (0); +} + +void +mpt_user_reset(struct mpt_softc *mpt, int type) +{ +} + +void +mpt_user_detach(struct mpt_softc *mpt) +{ + mpt_handler_t handler; + + /* XXX: do a purge of pending requests? */ + destroy_dev(mpt->cdev); + + MPT_LOCK(mpt); + handler.reply_handler = mpt_user_reply_handler; + mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, + user_handler_id); + MPT_UNLOCK(mpt); +} + +static int +mpt_open(struct dev_open_args *ap) +{ + + return (0); +} + +static int +mpt_close(struct dev_close_args *ap) +{ + + return (0); +} + +static int +mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem, + size_t len) +{ + struct mpt_map_info mi; + int error; + + page_mem->vaddr = NULL; + + /* Limit requests to 16M. */ + if (len > 16 * 1024 * 1024) + return (ENOSPC); + error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + len, 1, len, 0, &page_mem->tag); + if (error) + return (error); + error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr, + BUS_DMA_NOWAIT, &page_mem->map); + if (error) { + bus_dma_tag_destroy(page_mem->tag); + return (error); + } + mi.mpt = mpt; + error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr, + len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT); + if (error == 0) + error = mi.error; + if (error) { + bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map); + bus_dma_tag_destroy(page_mem->tag); + page_mem->vaddr = NULL; + return (error); + } + page_mem->paddr = mi.phys; + return (0); +} + +static void +mpt_free_buffer(struct mpt_page_memory *page_mem) +{ + + if (page_mem->vaddr == NULL) + return; + bus_dmamap_unload(page_mem->tag, page_mem->map); + bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map); + bus_dma_tag_destroy(page_mem->tag); + page_mem->vaddr = NULL; +} + +static int +mpt_user_read_cfg_header(struct mpt_softc *mpt, + struct mpt_cfg_page_req *page_req) +{ + request_t *req; + cfgparms_t params; + MSG_CONFIG *cfgp; + int error; + + req = mpt_get_request(mpt, TRUE); + if (req == NULL) { + mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n"); + return (ENOMEM); + } + + params.Action = MPI_CONFIG_ACTION_PAGE_HEADER; + params.PageVersion = 0; + params.PageLength = 0; + params.PageNumber = page_req->header.PageNumber; + params.PageType = page_req->header.PageType; + params.PageAddress = le32toh(page_req->page_address); + error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0, + TRUE, 5000); + if (error != 0) { + /* + * Leave the request. Without resetting the chip, it's + * still owned by it and we'll just get into trouble + * freeing it now. Mark it as abandoned so that if it + * shows up later it can be freed. + */ + mpt_prt(mpt, "read_cfg_header timed out\n"); + return (ETIMEDOUT); + } + + page_req->ioc_status = htole16(req->IOCStatus); + if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) { + cfgp = req->req_vbuf; + bcopy(&cfgp->Header, &page_req->header, + sizeof(page_req->header)); + } + mpt_free_request(mpt, req); + return (0); +} + +static int +mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req, + struct mpt_page_memory *mpt_page) +{ + CONFIG_PAGE_HEADER *hdr; + request_t *req; + cfgparms_t params; + int error; + + req = mpt_get_request(mpt, TRUE); + if (req == NULL) { + mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n"); + return (ENOMEM); + } + + hdr = mpt_page->vaddr; + params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + params.PageVersion = hdr->PageVersion; + params.PageLength = hdr->PageLength; + params.PageNumber = hdr->PageNumber; + params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK; + params.PageAddress = le32toh(page_req->page_address); + error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr, + le32toh(page_req->len), TRUE, 5000); + if (error != 0) { + mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n"); + return (ETIMEDOUT); + } + + page_req->ioc_status = htole16(req->IOCStatus); + if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) + bus_dmamap_sync(mpt_page->tag, mpt_page->map, + BUS_DMASYNC_POSTREAD); + mpt_free_request(mpt, req); + return (0); +} + +static int +mpt_user_read_extcfg_header(struct mpt_softc *mpt, + struct mpt_ext_cfg_page_req *ext_page_req) +{ + request_t *req; + cfgparms_t params; + MSG_CONFIG_REPLY *cfgp; + int error; + + req = mpt_get_request(mpt, TRUE); + if (req == NULL) { + mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n"); + return (ENOMEM); + } + + params.Action = MPI_CONFIG_ACTION_PAGE_HEADER; + params.PageVersion = ext_page_req->header.PageVersion; + params.PageLength = 0; + params.PageNumber = ext_page_req->header.PageNumber; + params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; + params.PageAddress = le32toh(ext_page_req->page_address); + params.ExtPageType = ext_page_req->header.ExtPageType; + params.ExtPageLength = 0; + error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0, + TRUE, 5000); + if (error != 0) { + /* + * Leave the request. Without resetting the chip, it's + * still owned by it and we'll just get into trouble + * freeing it now. Mark it as abandoned so that if it + * shows up later it can be freed. + */ + mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n"); + return (ETIMEDOUT); + } + + ext_page_req->ioc_status = htole16(req->IOCStatus); + if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) { + cfgp = req->req_vbuf; + ext_page_req->header.PageVersion = cfgp->Header.PageVersion; + ext_page_req->header.PageNumber = cfgp->Header.PageNumber; + ext_page_req->header.PageType = cfgp->Header.PageType; + ext_page_req->header.ExtPageLength = cfgp->ExtPageLength; + ext_page_req->header.ExtPageType = cfgp->ExtPageType; + } + mpt_free_request(mpt, req); + return (0); +} + +static int +mpt_user_read_extcfg_page(struct mpt_softc *mpt, + struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page) +{ + CONFIG_EXTENDED_PAGE_HEADER *hdr; + request_t *req; + cfgparms_t params; + int error; + + req = mpt_get_request(mpt, TRUE); + if (req == NULL) { + mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n"); + return (ENOMEM); + } + + hdr = mpt_page->vaddr; + params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; + params.PageVersion = hdr->PageVersion; + params.PageLength = 0; + params.PageNumber = hdr->PageNumber; + params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; + params.PageAddress = le32toh(ext_page_req->page_address); + params.ExtPageType = hdr->ExtPageType; + params.ExtPageLength = hdr->ExtPageLength; + error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr, + le32toh(ext_page_req->len), TRUE, 5000); + if (error != 0) { + mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n"); + return (ETIMEDOUT); + } + + ext_page_req->ioc_status = htole16(req->IOCStatus); + if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) + bus_dmamap_sync(mpt_page->tag, mpt_page->map, + BUS_DMASYNC_POSTREAD); + mpt_free_request(mpt, req); + return (0); +} + +static int +mpt_user_write_cfg_page(struct mpt_softc *mpt, + struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page) +{ + CONFIG_PAGE_HEADER *hdr; + request_t *req; + cfgparms_t params; + u_int hdr_attr; + int error; + + hdr = mpt_page->vaddr; + hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; + if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && + hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { + mpt_prt(mpt, "page type 0x%x not changeable\n", + hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); + return (EINVAL); + } + +#if 0 + /* + * We shouldn't mask off other bits here. + */ + hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK; +#endif + + req = mpt_get_request(mpt, TRUE); + if (req == NULL) + return (ENOMEM); + + bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREWRITE); + + /* + * There isn't any point in restoring stripped out attributes + * if you then mask them going down to issue the request. + */ + + params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT; + params.PageVersion = hdr->PageVersion; + params.PageLength = hdr->PageLength; + params.PageNumber = hdr->PageNumber; + params.PageAddress = le32toh(page_req->page_address); +#if 0 + /* Restore stripped out attributes */ + hdr->PageType |= hdr_attr; + params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK; +#else + params.PageType = hdr->PageType; +#endif + error = mpt_issue_cfg_req(mpt, req, ¶ms, mpt_page->paddr, + le32toh(page_req->len), TRUE, 5000); + if (error != 0) { + mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); + return (ETIMEDOUT); + } + + page_req->ioc_status = htole16(req->IOCStatus); + mpt_free_request(mpt, req); + return (0); +} + +static int +mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req, + uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) +{ + MSG_RAID_ACTION_REPLY *reply; + struct mpt_user_raid_action_result *res; + + if (req == NULL) + return (TRUE); + + if (reply_frame != NULL) { + bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, + BUS_DMASYNC_POSTREAD); + reply = (MSG_RAID_ACTION_REPLY *)reply_frame; + req->IOCStatus = le16toh(reply->IOCStatus); + res = (struct mpt_user_raid_action_result *) + (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt)); + res->action_status = reply->ActionStatus; + res->volume_status = reply->VolumeStatus; + bcopy(&reply->ActionData, res->action_data, + sizeof(res->action_data)); + } + + req->state &= ~REQ_STATE_QUEUED; + req->state |= REQ_STATE_DONE; + TAILQ_REMOVE(&mpt->request_pending_list, req, links); + + if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { + wakeup(req); + } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { + /* + * Whew- we can free this request (late completion) + */ + mpt_free_request(mpt, req); + } + + return (TRUE); +} + +/* + * We use the first part of the request buffer after the request frame + * to hold the action data and action status from the RAID reply. The + * rest of the request buffer is used to hold the buffer for the + * action SGE. + */ +static int +mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act, + struct mpt_page_memory *mpt_page) +{ + request_t *req; + struct mpt_user_raid_action_result *res; + MSG_RAID_ACTION_REQUEST *rap; + SGE_SIMPLE32 *se; + int error; + + req = mpt_get_request(mpt, TRUE); + if (req == NULL) + return (ENOMEM); + rap = req->req_vbuf; + memset(rap, 0, sizeof *rap); + rap->Action = raid_act->action; + rap->ActionDataWord = raid_act->action_data_word; + rap->Function = MPI_FUNCTION_RAID_ACTION; + rap->VolumeID = raid_act->volume_id; + rap->VolumeBus = raid_act->volume_bus; + rap->PhysDiskNum = raid_act->phys_disk_num; + se = (SGE_SIMPLE32 *)&rap->ActionDataSGE; + if (mpt_page->vaddr != NULL && raid_act->len != 0) { + bus_dmamap_sync(mpt_page->tag, mpt_page->map, + BUS_DMASYNC_PREWRITE); + se->Address = htole32(mpt_page->paddr); + MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len)); + MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | + MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | + MPI_SGE_FLAGS_END_OF_LIST | + raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC : + MPI_SGE_FLAGS_IOC_TO_HOST)); + } + se->FlagsLength = htole32(se->FlagsLength); + rap->MsgContext = htole32(req->index | user_handler_id); + + mpt_check_doorbell(mpt); + mpt_send_cmd(mpt, req); + + error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE, + 2000); + if (error != 0) { + /* + * Leave request so it can be cleaned up later. + */ + mpt_prt(mpt, "mpt_user_raid_action timed out\n"); + return (error); + } + + raid_act->ioc_status = htole16(req->IOCStatus); + if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { + mpt_free_request(mpt, req); + return (0); + } + + res = (struct mpt_user_raid_action_result *) + (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt)); + raid_act->volume_status = res->volume_status; + raid_act->action_status = res->action_status; + bcopy(res->action_data, raid_act->action_data, + sizeof(res->action_data)); + if (mpt_page->vaddr != NULL) + bus_dmamap_sync(mpt_page->tag, mpt_page->map, + BUS_DMASYNC_POSTREAD); + mpt_free_request(mpt, req); + return (0); +} + +#ifdef __amd64__ +#define PTRIN(p) ((void *)(uintptr_t)(p)) +#define PTROUT(v) ((u_int32_t)(uintptr_t)(v)) +#endif + +static int +mpt_ioctl(struct dev_ioctl_args *ap) +{ + struct mpt_softc *mpt; + struct mpt_cfg_page_req *page_req; + struct mpt_ext_cfg_page_req *ext_page_req; + struct mpt_raid_action *raid_act; + struct mpt_page_memory mpt_page; +#ifdef __amd64__ + struct mpt_cfg_page_req32 *page_req32; + struct mpt_cfg_page_req page_req_swab; + struct mpt_ext_cfg_page_req32 *ext_page_req32; + struct mpt_ext_cfg_page_req ext_page_req_swab; + struct mpt_raid_action32 *raid_act32; + struct mpt_raid_action raid_act_swab; +#endif + u_long cmd = ap->a_cmd; + caddr_t arg = ap->a_data; + struct cdev *kdev = ap->a_head.a_dev; + int error; + + mpt = kdev->si_drv1; + page_req = (void *)arg; + ext_page_req = (void *)arg; + raid_act = (void *)arg; + mpt_page.vaddr = NULL; + +#ifdef __amd64__ + /* Convert 32-bit structs to native ones. */ + page_req32 = (void *)arg; + ext_page_req32 = (void *)arg; + raid_act32 = (void *)arg; + switch (cmd) { + case MPTIO_READ_CFG_HEADER32: + case MPTIO_READ_CFG_PAGE32: + case MPTIO_WRITE_CFG_PAGE32: + page_req = &page_req_swab; + page_req->header = page_req32->header; + page_req->page_address = page_req32->page_address; + page_req->buf = PTRIN(page_req32->buf); + page_req->len = page_req32->len; + page_req->ioc_status = page_req32->ioc_status; + break; + case MPTIO_READ_EXT_CFG_HEADER32: + case MPTIO_READ_EXT_CFG_PAGE32: + ext_page_req = &ext_page_req_swab; + ext_page_req->header = ext_page_req32->header; + ext_page_req->page_address = ext_page_req32->page_address; + ext_page_req->buf = PTRIN(ext_page_req32->buf); + ext_page_req->len = ext_page_req32->len; + ext_page_req->ioc_status = ext_page_req32->ioc_status; + break; + case MPTIO_RAID_ACTION32: + raid_act = &raid_act_swab; + raid_act->action = raid_act32->action; + raid_act->volume_bus = raid_act32->volume_bus; + raid_act->volume_id = raid_act32->volume_id; + raid_act->phys_disk_num = raid_act32->phys_disk_num; + raid_act->action_data_word = raid_act32->action_data_word; + raid_act->buf = PTRIN(raid_act32->buf); + raid_act->len = raid_act32->len; + raid_act->volume_status = raid_act32->volume_status; + bcopy(raid_act32->action_data, raid_act->action_data, + sizeof(raid_act->action_data)); + raid_act->action_status = raid_act32->action_status; + raid_act->ioc_status = raid_act32->ioc_status; + raid_act->write = raid_act32->write; + break; + } +#endif + + switch (cmd) { +#ifdef __amd64__ + case MPTIO_READ_CFG_HEADER32: +#endif + case MPTIO_READ_CFG_HEADER: + MPT_LOCK(mpt); + error = mpt_user_read_cfg_header(mpt, page_req); + MPT_UNLOCK(mpt); + break; +#ifdef __amd64__ + case MPTIO_READ_CFG_PAGE32: +#endif + case MPTIO_READ_CFG_PAGE: + error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len); + if (error) + break; + error = copyin(page_req->buf, mpt_page.vaddr, + sizeof(CONFIG_PAGE_HEADER)); + if (error) + break; + MPT_LOCK(mpt); + error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page); + MPT_UNLOCK(mpt); + if (error) + break; + error = copyout(mpt_page.vaddr, page_req->buf, page_req->len); + break; +#ifdef __amd64__ + case MPTIO_READ_EXT_CFG_HEADER32: +#endif + case MPTIO_READ_EXT_CFG_HEADER: + MPT_LOCK(mpt); + error = mpt_user_read_extcfg_header(mpt, ext_page_req); + MPT_UNLOCK(mpt); + break; +#ifdef __amd64__ + case MPTIO_READ_EXT_CFG_PAGE32: +#endif + case MPTIO_READ_EXT_CFG_PAGE: + error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len); + if (error) + break; + error = copyin(ext_page_req->buf, mpt_page.vaddr, + sizeof(CONFIG_EXTENDED_PAGE_HEADER)); + if (error) + break; + MPT_LOCK(mpt); + error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page); + MPT_UNLOCK(mpt); + if (error) + break; + error = copyout(mpt_page.vaddr, ext_page_req->buf, + ext_page_req->len); + break; +#ifdef __amd64__ + case MPTIO_WRITE_CFG_PAGE32: +#endif + case MPTIO_WRITE_CFG_PAGE: + error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len); + if (error) + break; + error = copyin(page_req->buf, mpt_page.vaddr, page_req->len); + if (error) + break; + MPT_LOCK(mpt); + error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page); + MPT_UNLOCK(mpt); + break; +#ifdef __amd64__ + case MPTIO_RAID_ACTION32: +#endif + case MPTIO_RAID_ACTION: + if (raid_act->buf != NULL) { + error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len); + if (error) + break; + error = copyin(raid_act->buf, mpt_page.vaddr, + raid_act->len); + if (error) + break; + } + MPT_LOCK(mpt); + error = mpt_user_raid_action(mpt, raid_act, &mpt_page); + MPT_UNLOCK(mpt); + if (error) + break; + if (raid_act->buf != NULL) + error = copyout(mpt_page.vaddr, raid_act->buf, + raid_act->len); + break; + default: + error = ENOIOCTL; + break; + } + + mpt_free_buffer(&mpt_page); + + if (error) + return (error); + +#ifdef __amd64__ + /* Convert native structs to 32-bit ones. */ + switch (cmd) { + case MPTIO_READ_CFG_HEADER32: + case MPTIO_READ_CFG_PAGE32: + case MPTIO_WRITE_CFG_PAGE32: + page_req32->header = page_req->header; + page_req32->page_address = page_req->page_address; + page_req32->buf = PTROUT(page_req->buf); + page_req32->len = page_req->len; + page_req32->ioc_status = page_req->ioc_status; + break; + case MPTIO_READ_EXT_CFG_HEADER32: + case MPTIO_READ_EXT_CFG_PAGE32: + ext_page_req32->header = ext_page_req->header; + ext_page_req32->page_address = ext_page_req->page_address; + ext_page_req32->buf = PTROUT(ext_page_req->buf); + ext_page_req32->len = ext_page_req->len; + ext_page_req32->ioc_status = ext_page_req->ioc_status; + break; + case MPTIO_RAID_ACTION32: + raid_act32->action = raid_act->action; + raid_act32->volume_bus = raid_act->volume_bus; + raid_act32->volume_id = raid_act->volume_id; + raid_act32->phys_disk_num = raid_act->phys_disk_num; + raid_act32->action_data_word = raid_act->action_data_word; + raid_act32->buf = PTROUT(raid_act->buf); + raid_act32->len = raid_act->len; + raid_act32->volume_status = raid_act->volume_status; + bcopy(raid_act->action_data, raid_act32->action_data, + sizeof(raid_act->action_data)); + raid_act32->action_status = raid_act->action_status; + raid_act32->ioc_status = raid_act->ioc_status; + raid_act32->write = raid_act->write; + break; + } +#endif + + return (0); +} diff --git a/sys/sys/mpt_ioctl.h b/sys/sys/mpt_ioctl.h new file mode 100644 index 0000000000..77c73aec3a --- /dev/null +++ b/sys/sys/mpt_ioctl.h @@ -0,0 +1,132 @@ +/*- + * Copyright (c) 2008 Yahoo!, Inc. + * All rights reserved. + * Written by: John Baldwin + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the author nor the names of any co-contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * LSI MPT-Fusion Host Adapter FreeBSD userland interface + * + * $FreeBSD: src/sys/sys/mpt_ioctl.h,v 1.1 2008/05/06 20:49:52 jhb Exp $ + */ + +#ifndef _MPT_IOCTL_H_ +#define _MPT_IOCTL_H_ + +#include +#include +#include + +/* + * For the read header requests, the header should include the page + * type or extended page type, page number, and page version. The + * buffer and length are unused. The completed header is returned in + * the 'header' member. + * + * For the read page and write page requests, 'buf' should point to a + * buffer of 'len' bytes which holds the entire page (including the + * header). + * + * All requests specify the page address in 'page_address'. + */ +struct mpt_cfg_page_req { + CONFIG_PAGE_HEADER header; + uint32_t page_address; + void *buf; + int len; + uint16_t ioc_status; +}; + +struct mpt_ext_cfg_page_req { + CONFIG_EXTENDED_PAGE_HEADER header; + uint32_t page_address; + void *buf; + int len; + uint16_t ioc_status; +}; + +struct mpt_raid_action { + uint8_t action; + uint8_t volume_bus; + uint8_t volume_id; + uint8_t phys_disk_num; + uint32_t action_data_word; + void *buf; + int len; + uint32_t volume_status; + uint32_t action_data[4]; + uint16_t action_status; + uint16_t ioc_status; + uint8_t write; +}; + +#define MPTIO_READ_CFG_HEADER _IOWR('M', 100, struct mpt_cfg_page_req) +#define MPTIO_READ_CFG_PAGE _IOWR('M', 101, struct mpt_cfg_page_req) +#define MPTIO_READ_EXT_CFG_HEADER _IOWR('M', 102, struct mpt_ext_cfg_page_req) +#define MPTIO_READ_EXT_CFG_PAGE _IOWR('M', 103, struct mpt_ext_cfg_page_req) +#define MPTIO_WRITE_CFG_PAGE _IOWR('M', 104, struct mpt_cfg_page_req) +#define MPTIO_RAID_ACTION _IOWR('M', 105, struct mpt_raid_action) + +#if defined(__amd64__) +struct mpt_cfg_page_req32 { + CONFIG_PAGE_HEADER header; + uint32_t page_address; + uint32_t buf; + int len; + uint16_t ioc_status; +}; + +struct mpt_ext_cfg_page_req32 { + CONFIG_EXTENDED_PAGE_HEADER header; + uint32_t page_address; + uint32_t buf; + int len; + uint16_t ioc_status; +}; + +struct mpt_raid_action32 { + uint8_t action; + uint8_t volume_bus; + uint8_t volume_id; + uint8_t phys_disk_num; + uint32_t action_data_word; + uint32_t buf; + int len; + uint32_t volume_status; + uint32_t action_data[4]; + uint16_t action_status; + uint16_t ioc_status; + uint8_t write; +}; + +#define MPTIO_READ_CFG_HEADER32 _IOWR('M', 100, struct mpt_cfg_page_req32) +#define MPTIO_READ_CFG_PAGE32 _IOWR('M', 101, struct mpt_cfg_page_req32) +#define MPTIO_READ_EXT_CFG_HEADER32 _IOWR('M', 102, struct mpt_ext_cfg_page_req32) +#define MPTIO_READ_EXT_CFG_PAGE32 _IOWR('M', 103, struct mpt_ext_cfg_page_req32) +#define MPTIO_WRITE_CFG_PAGE32 _IOWR('M', 104, struct mpt_cfg_page_req32) +#define MPTIO_RAID_ACTION32 _IOWR('M', 105, struct mpt_raid_action32) +#endif + +#endif /* !_MPT_IOCTL_H_ */