mpt(4): Use MSI on SAS adapters that support it.
[dragonfly.git] / sys / dev / disk / mpt / mpt.h
... / ...
CommitLineData
1/* $FreeBSD: src/sys/dev/mpt/mpt.h,v 1.60 2012/03/24 00:30:17 marius Exp $ */
2/*-
3 * Generic defines for LSI '909 FC adapters.
4 * FreeBSD Version.
5 *
6 * Copyright (c) 2000, 2001 by Greg Ansley
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29/*-
30 * Copyright (c) 2002, 2006 by Matthew Jacob
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions are
35 * met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
39 * substantially similar to the "NO WARRANTY" disclaimer below
40 * ("Disclaimer") and any redistribution must be conditioned upon including
41 * a substantially similar Disclaimer requirement for further binary
42 * redistribution.
43 * 3. Neither the names of the above listed copyright holders nor the names
44 * of any contributors may be used to endorse or promote products derived
45 * from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
51 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
57 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 *
59 * Support from Chris Ellsworth in order to make SAS adapters work
60 * is gratefully acknowledged.
61 *
62 *
63 * Support from LSI-Logic has also gone a great deal toward making this a
64 * workable subsystem and is gratefully acknowledged.
65 */
66/*
67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * Copyright (c) 2005, WHEEL Sp. z o.o.
70 * All rights reserved.
71 *
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions are
74 * met:
75 * 1. Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
78 * substantially similar to the "NO WARRANTY" disclaimer below
79 * ("Disclaimer") and any redistribution must be conditioned upon including
80 * a substantially similar Disclaimer requirement for further binary
81 * redistribution.
82 * 3. Neither the names of the above listed copyright holders nor the names
83 * of any contributors may be used to endorse or promote products derived
84 * from this software without specific prior written permission.
85 *
86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
97 */
98
99#ifndef _MPT_H_
100#define _MPT_H_
101
102/********************************* OS Includes ********************************/
103#include <sys/types.h>
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/endian.h>
107#include <sys/eventhandler.h>
108#include <sys/kernel.h>
109#include <sys/queue.h>
110#include <sys/malloc.h>
111#include <sys/devicestat.h>
112#include <sys/proc.h>
113#include <sys/bus.h>
114#include <sys/module.h>
115#include <sys/thread2.h>
116#include <sys/mplock2.h>
117
118#include <sys/rman.h>
119#include <sys/sysctl.h>
120
121#include <bus/pci/pcireg.h>
122#include <bus/pci/pcivar.h>
123
124/**************************** Register Definitions ****************************/
125#include <dev/disk/mpt/mpt_reg.h>
126
127/******************************* MPI Definitions ******************************/
128#include <dev/disk/mpt/mpilib/mpi_type.h>
129#include <dev/disk/mpt/mpilib/mpi.h>
130#include <dev/disk/mpt/mpilib/mpi_cnfg.h>
131#include <dev/disk/mpt/mpilib/mpi_ioc.h>
132#include <dev/disk/mpt/mpilib/mpi_raid.h>
133
134/* XXX For mpt_debug.c */
135#include <dev/disk/mpt/mpilib/mpi_init.h>
136
137#define MPT_S64_2_SCALAR(y) ((((int64_t)y.High) << 32) | (y.Low))
138#define MPT_U64_2_SCALAR(y) ((((uint64_t)y.High) << 32) | (y.Low))
139
140/****************************** Misc Definitions ******************************/
141/* #define MPT_TEST_MULTIPATH 1 */
142#define MPT_OK (0)
143#define MPT_FAIL (0x10000)
144
145#define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array))
146
147#define MPT_ROLE_NONE 0
148#define MPT_ROLE_INITIATOR 1
149#define MPT_ROLE_TARGET 2
150#define MPT_ROLE_BOTH 3
151#define MPT_ROLE_DEFAULT MPT_ROLE_INITIATOR
152
153#define MPT_INI_ID_NONE -1
154
155/**************************** Forward Declarations ****************************/
156struct mpt_softc;
157struct mpt_personality;
158typedef struct req_entry request_t;
159
160/************************* Personality Module Support *************************/
161typedef int mpt_load_handler_t(struct mpt_personality *);
162typedef int mpt_probe_handler_t(struct mpt_softc *);
163typedef int mpt_attach_handler_t(struct mpt_softc *);
164typedef int mpt_enable_handler_t(struct mpt_softc *);
165typedef void mpt_ready_handler_t(struct mpt_softc *);
166typedef int mpt_event_handler_t(struct mpt_softc *, request_t *,
167 MSG_EVENT_NOTIFY_REPLY *);
168typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/);
169/* XXX Add return value and use for veto? */
170typedef void mpt_shutdown_handler_t(struct mpt_softc *);
171typedef void mpt_detach_handler_t(struct mpt_softc *);
172typedef int mpt_unload_handler_t(struct mpt_personality *);
173
174struct mpt_personality
175{
176 const char *name;
177 uint32_t id; /* Assigned identifier. */
178 u_int use_count; /* Instances using personality*/
179 mpt_load_handler_t *load; /* configure personailty */
180#define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load)
181 mpt_probe_handler_t *probe; /* configure personailty */
182 mpt_attach_handler_t *attach; /* initialize device instance */
183 mpt_enable_handler_t *enable; /* enable device */
184 mpt_ready_handler_t *ready; /* final open for business */
185 mpt_event_handler_t *event; /* Handle MPI event. */
186 mpt_reset_handler_t *reset; /* Re-init after reset. */
187 mpt_shutdown_handler_t *shutdown; /* Shutdown instance. */
188 mpt_detach_handler_t *detach; /* release device instance */
189 mpt_unload_handler_t *unload; /* Shutdown personality */
190#define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload)
191};
192
193int mpt_modevent(module_t, int, void *);
194
195/* Maximum supported number of personalities. */
196#define MPT_MAX_PERSONALITIES (15)
197
198#define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \
199 MODULE_DEPEND(name, dep, vmin, vpref, vmax)
200
201#define DECLARE_MPT_PERSONALITY(name, order) \
202 static moduledata_t name##_mod = { \
203 #name, mpt_modevent, &name##_personality \
204 }; \
205 DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order); \
206 MODULE_VERSION(name, 1); \
207 MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1)
208
209/******************************* Bus DMA Support ******************************/
210/* XXX Need to update bus_dmamap_sync to take a range argument. */
211#define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op) \
212 bus_dmamap_sync(dma_tag, dmamap, op)
213
214#define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \
215 lowaddr, highaddr, filter, filterarg, \
216 maxsize, nsegments, maxsegsz, flags, \
217 dma_tagp) \
218 bus_dma_tag_create(parent_tag, alignment, boundary, \
219 lowaddr, highaddr, filter, filterarg, \
220 maxsize, nsegments, maxsegsz, flags, \
221 dma_tagp)
222
223struct mpt_map_info {
224 struct mpt_softc *mpt;
225 int error;
226 uint32_t phys;
227};
228
229void mpt_map_rquest(void *, bus_dma_segment_t *, int, int);
230/* **************************** NewBUS interrupt Crock ************************/
231#define mpt_setup_intr(d, i, f, U, if, ifa, hp) \
232 bus_setup_intr(d, i, f, if, ifa, hp, NULL)
233
234/* **************************** NewBUS CAM Support ****************************/
235#define mpt_xpt_bus_register(sim, parent, bus) \
236 xpt_bus_register(sim, bus)
237
238/**************************** Kernel Thread Support ***************************/
239#define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
240 kthread_create(func, farg, proc_ptr, fmtstr, arg)
241#define mpt_kthread_exit(status) \
242 kthread_exit()
243
244/********************************** Endianess *********************************/
245#define MPT_2_HOST64(ptr, tag) ptr->tag = le64toh(ptr->tag)
246#define MPT_2_HOST32(ptr, tag) ptr->tag = le32toh(ptr->tag)
247#define MPT_2_HOST16(ptr, tag) ptr->tag = le16toh(ptr->tag)
248
249#define HOST_2_MPT64(ptr, tag) ptr->tag = htole64(ptr->tag)
250#define HOST_2_MPT32(ptr, tag) ptr->tag = htole32(ptr->tag)
251#define HOST_2_MPT16(ptr, tag) ptr->tag = htole16(ptr->tag)
252
253#if _BYTE_ORDER == _BIG_ENDIAN
254void mpt2host_sge_simple_union(SGE_SIMPLE_UNION *);
255void mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *);
256void mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *);
257void mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *);
258void mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *);
259void mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *);
260void mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *);
261void host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *);
262void mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *);
263void mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *);
264void mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *);
265void host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *);
266void mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *);
267void mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *);
268void host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *);
269void mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *);
270void mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *);
271void mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *);
272#else
273#define mpt2host_sge_simple_union(x) do { ; } while (0)
274#define mpt2host_iocfacts_reply(x) do { ; } while (0)
275#define mpt2host_portfacts_reply(x) do { ; } while (0)
276#define mpt2host_config_page_ioc2(x) do { ; } while (0)
277#define mpt2host_config_page_ioc3(x) do { ; } while (0)
278#define mpt2host_config_page_scsi_port_0(x) do { ; } while (0)
279#define mpt2host_config_page_scsi_port_1(x) do { ; } while (0)
280#define host2mpt_config_page_scsi_port_1(x) do { ; } while (0)
281#define mpt2host_config_page_scsi_port_2(x) do { ; } while (0)
282#define mpt2host_config_page_scsi_device_0(x) do { ; } while (0)
283#define mpt2host_config_page_scsi_device_1(x) do { ; } while (0)
284#define host2mpt_config_page_scsi_device_1(x) do { ; } while (0)
285#define mpt2host_config_page_fc_port_0(x) do { ; } while (0)
286#define mpt2host_config_page_fc_port_1(x) do { ; } while (0)
287#define host2mpt_config_page_fc_port_1(x) do { ; } while (0)
288#define mpt2host_config_page_raid_vol_0(x) do { ; } while (0)
289#define mpt2host_config_page_raid_phys_disk_0(x) \
290 do { ; } while (0)
291#define mpt2host_mpi_raid_vol_indicator(x) do { ; } while (0)
292#endif
293
294/**************************** MPI Transaction State ***************************/
295typedef enum {
296 REQ_STATE_NIL = 0x00,
297 REQ_STATE_FREE = 0x01,
298 REQ_STATE_ALLOCATED = 0x02,
299 REQ_STATE_QUEUED = 0x04,
300 REQ_STATE_DONE = 0x08,
301 REQ_STATE_TIMEDOUT = 0x10,
302 REQ_STATE_NEED_WAKEUP = 0x20,
303 REQ_STATE_LOCKED = 0x80, /* can't be freed */
304 REQ_STATE_MASK = 0xFF
305} mpt_req_state_t;
306
307struct req_entry {
308 TAILQ_ENTRY(req_entry) links; /* Pointer to next in list */
309 mpt_req_state_t state; /* Request State Information */
310 uint16_t index; /* Index of this entry */
311 uint16_t IOCStatus; /* Completion status */
312 uint16_t ResponseCode; /* TMF Response Code */
313 uint16_t serno; /* serial number */
314 union ccb *ccb; /* CAM request */
315 void *req_vbuf; /* Virtual Address of Entry */
316 void *sense_vbuf; /* Virtual Address of sense data */
317 bus_addr_t req_pbuf; /* Physical Address of Entry */
318 bus_addr_t sense_pbuf; /* Physical Address of sense data */
319 bus_dmamap_t dmap; /* DMA map for data buffers */
320 struct req_entry *chain; /* for SGE overallocations */
321 struct callout callout; /* Timeout for the request */
322};
323
324typedef struct mpt_config_params {
325 u_int Action;
326 u_int PageVersion;
327 u_int PageLength;
328 u_int PageNumber;
329 u_int PageType;
330 u_int PageAddress;
331 u_int ExtPageLength;
332 u_int ExtPageType;
333} cfgparms_t;
334
335/**************************** MPI Target State Info ***************************/
336
337typedef struct {
338 uint32_t reply_desc; /* current reply descriptor */
339 uint32_t resid; /* current data residual */
340 uint32_t bytes_xfered; /* current relative offset */
341 union ccb *ccb; /* pointer to currently active ccb */
342 request_t *req; /* pointer to currently active assist request */
343 uint32_t
344 is_local : 1,
345 nxfers : 31;
346 uint32_t tag_id;
347 enum {
348 TGT_STATE_NIL,
349 TGT_STATE_LOADING,
350 TGT_STATE_LOADED,
351 TGT_STATE_IN_CAM,
352 TGT_STATE_SETTING_UP_FOR_DATA,
353 TGT_STATE_MOVING_DATA,
354 TGT_STATE_MOVING_DATA_AND_STATUS,
355 TGT_STATE_SENDING_STATUS
356 } state;
357} mpt_tgt_state_t;
358
359/*
360 * When we get an incoming command it has its own tag which is called the
361 * IoIndex. This is the value we gave that particular command buffer when
362 * we originally assigned it. It's just a number, really. The FC card uses
363 * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which
364 * contains pointers the request_t structures related to that IoIndex.
365 *
366 * What *we* do is construct a tag out of the index for the target command
367 * which owns the incoming ATIO plus a rolling sequence number.
368 */
369#define MPT_MAKE_TAGID(mpt, req, ioindex) \
370 ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff))
371
372#ifdef INVARIANTS
373#define MPT_TAG_2_REQ(a, b) mpt_tag_2_req(a, (uint32_t) b)
374#else
375#define MPT_TAG_2_REQ(mpt, tag) mpt->tgt_cmd_ptrs[tag >> 18]
376#endif
377
378#define MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \
379 (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)]))
380
381STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr);
382#define MPT_MAX_LUNS 256
383typedef struct {
384 struct mpt_hdr_stailq atios;
385 struct mpt_hdr_stailq inots;
386 int enabled;
387} tgt_resource_t;
388#define MPT_MAX_ELS 64
389
390/**************************** Handler Registration ****************************/
391/*
392 * Global table of registered reply handlers. The
393 * handler is indicated by byte 3 of the request
394 * index submitted to the IOC. This allows the
395 * driver core to perform generic processing without
396 * any knowledge of per-personality behavior.
397 *
398 * MPT_NUM_REPLY_HANDLERS must be a power of 2
399 * to allow the easy generation of a mask.
400 *
401 * The handler offsets used by the core are hard coded
402 * allowing faster code generation when assigning a handler
403 * to a request. All "personalities" must use the
404 * the handler registration mechanism.
405 *
406 * The IOC handlers that are rarely executed are placed
407 * at the tail of the table to make it more likely that
408 * all commonly executed handlers fit in a single cache
409 * line.
410 */
411#define MPT_NUM_REPLY_HANDLERS (32)
412#define MPT_REPLY_HANDLER_EVENTS MPT_CBI_TO_HID(0)
413#define MPT_REPLY_HANDLER_CONFIG MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1)
414#define MPT_REPLY_HANDLER_HANDSHAKE MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2)
415typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request,
416 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame);
417typedef union {
418 mpt_reply_handler_t *reply_handler;
419} mpt_handler_t;
420
421typedef enum {
422 MPT_HANDLER_REPLY,
423 MPT_HANDLER_EVENT,
424 MPT_HANDLER_RESET,
425 MPT_HANDLER_SHUTDOWN
426} mpt_handler_type;
427
428struct mpt_handler_record
429{
430 LIST_ENTRY(mpt_handler_record) links;
431 mpt_handler_t handler;
432};
433
434LIST_HEAD(mpt_handler_list, mpt_handler_record);
435
436/*
437 * The handler_id is currently unused but would contain the
438 * handler ID used in the MsgContext field to allow direction
439 * of replies to the handler. Registrations that don't require
440 * a handler id can pass in NULL for the handler_id.
441 *
442 * Deregistrations for handlers without a handler id should
443 * pass in MPT_HANDLER_ID_NONE.
444 */
445#define MPT_HANDLER_ID_NONE (0xFFFFFFFF)
446int mpt_register_handler(struct mpt_softc *, mpt_handler_type,
447 mpt_handler_t, uint32_t *);
448int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type,
449 mpt_handler_t, uint32_t);
450
451/******************* Per-Controller Instance Data Structures ******************/
452TAILQ_HEAD(req_queue, req_entry);
453
454/* Structure for saving proper values for modifyable PCI config registers */
455struct mpt_pci_cfg {
456 uint16_t Command;
457 uint16_t LatencyTimer_LineSize;
458 uint32_t IO_BAR;
459 uint32_t Mem0_BAR[2];
460 uint32_t Mem1_BAR[2];
461 uint32_t ROM_BAR;
462 uint8_t IntLine;
463 uint32_t PMCSR;
464};
465
466typedef enum {
467 MPT_RVF_NONE = 0x0,
468 MPT_RVF_ACTIVE = 0x1,
469 MPT_RVF_ANNOUNCED = 0x2,
470 MPT_RVF_UP2DATE = 0x4,
471 MPT_RVF_REFERENCED = 0x8,
472 MPT_RVF_WCE_CHANGED = 0x10
473} mpt_raid_volume_flags;
474
475struct mpt_raid_volume {
476 CONFIG_PAGE_RAID_VOL_0 *config_page;
477 MPI_RAID_VOL_INDICATOR sync_progress;
478 mpt_raid_volume_flags flags;
479 u_int quiesced_disks;
480};
481
482typedef enum {
483 MPT_RDF_NONE = 0x00,
484 MPT_RDF_ACTIVE = 0x01,
485 MPT_RDF_ANNOUNCED = 0x02,
486 MPT_RDF_UP2DATE = 0x04,
487 MPT_RDF_REFERENCED = 0x08,
488 MPT_RDF_QUIESCING = 0x10,
489 MPT_RDF_QUIESCED = 0x20
490} mpt_raid_disk_flags;
491
492struct mpt_raid_disk {
493 CONFIG_PAGE_RAID_PHYS_DISK_0 config_page;
494 struct mpt_raid_volume *volume;
495 u_int member_number;
496 u_int pass_thru_active;
497 mpt_raid_disk_flags flags;
498};
499
500struct mpt_evtf_record {
501 MSG_EVENT_NOTIFY_REPLY reply;
502 uint32_t context;
503 LIST_ENTRY(mpt_evtf_record) links;
504};
505
506LIST_HEAD(mpt_evtf_list, mpt_evtf_record);
507
508struct mptsas_devinfo {
509 uint16_t dev_handle;
510 uint16_t parent_dev_handle;
511 uint16_t enclosure_handle;
512 uint16_t slot;
513 uint8_t phy_num;
514 uint8_t physical_port;
515 uint8_t target_id;
516 uint8_t bus;
517 uint64_t sas_address;
518 uint32_t device_info;
519};
520
521struct mptsas_phyinfo {
522 uint16_t handle;
523 uint8_t phy_num;
524 uint8_t port_id;
525 uint8_t negotiated_link_rate;
526 uint8_t hw_link_rate;
527 uint8_t programmed_link_rate;
528 uint8_t sas_port_add_phy;
529 struct mptsas_devinfo identify;
530 struct mptsas_devinfo attached;
531};
532
533struct mptsas_portinfo {
534 uint16_t num_phys;
535 struct mptsas_phyinfo *phy_info;
536};
537
538struct mpt_softc {
539 device_t dev;
540 struct lock mpt_lock;
541 int mpt_locksetup;
542 uint32_t mpt_pers_mask;
543 uint32_t
544 : 7,
545 unit : 8,
546 ready : 1,
547 fw_uploaded : 1,
548 msi_enable : 1,
549 twildcard : 1,
550 tenabled : 1,
551 do_cfg_role : 1,
552 raid_enabled : 1,
553 raid_mwce_set : 1,
554 getreqwaiter : 1,
555 shutdwn_raid : 1,
556 shutdwn_recovery: 1,
557 outofbeer : 1,
558 disabled : 1,
559 is_spi : 1,
560 is_sas : 1,
561 is_fc : 1,
562 is_1078 : 1;
563
564 u_int cfg_role;
565 u_int role; /* role: none, ini, target, both */
566
567 u_int verbose;
568#ifdef MPT_TEST_MULTIPATH
569 int failure_id;
570#endif
571
572 /*
573 * IOC Facts
574 */
575 MSG_IOC_FACTS_REPLY ioc_facts;
576
577 /*
578 * Port Facts
579 */
580 MSG_PORT_FACTS_REPLY * port_facts;
581#define mpt_max_tgtcmds port_facts[0].MaxPostedCmdBuffers
582
583 /*
584 * Device Configuration Information
585 */
586 union {
587 struct mpt_spi_cfg {
588 CONFIG_PAGE_SCSI_PORT_0 _port_page0;
589 CONFIG_PAGE_SCSI_PORT_1 _port_page1;
590 CONFIG_PAGE_SCSI_PORT_2 _port_page2;
591 CONFIG_PAGE_SCSI_DEVICE_0 _dev_page0[16];
592 CONFIG_PAGE_SCSI_DEVICE_1 _dev_page1[16];
593 int _ini_id;
594 uint16_t _tag_enable;
595 uint16_t _disc_enable;
596 } spi;
597#define mpt_port_page0 cfg.spi._port_page0
598#define mpt_port_page1 cfg.spi._port_page1
599#define mpt_port_page2 cfg.spi._port_page2
600#define mpt_dev_page0 cfg.spi._dev_page0
601#define mpt_dev_page1 cfg.spi._dev_page1
602#define mpt_ini_id cfg.spi._ini_id
603#define mpt_tag_enable cfg.spi._tag_enable
604#define mpt_disc_enable cfg.spi._disc_enable
605 struct mpi_fc_cfg {
606 CONFIG_PAGE_FC_PORT_0 _port_page0;
607 uint32_t _port_speed;
608#define mpt_fcport_page0 cfg.fc._port_page0
609#define mpt_fcport_speed cfg.fc._port_speed
610 } fc;
611 } cfg;
612 /*
613 * Device config information stored up for sysctl to access
614 */
615 union {
616 struct {
617 unsigned int initiator_id;
618 } spi;
619 struct {
620 char wwnn[19];
621 char wwpn[19];
622 } fc;
623 } scinfo;
624
625 /* Controller Info for RAID information */
626 CONFIG_PAGE_IOC_2 * ioc_page2;
627 CONFIG_PAGE_IOC_3 * ioc_page3;
628
629 /* Raid Data */
630 struct mpt_raid_volume* raid_volumes;
631 struct mpt_raid_disk* raid_disks;
632 u_int raid_max_volumes;
633 u_int raid_max_disks;
634 u_int raid_page0_len;
635 u_int raid_wakeup;
636 u_int raid_rescan;
637 u_int raid_resync_rate;
638 u_int raid_mwce_setting;
639 u_int raid_queue_depth;
640 u_int raid_nonopt_volumes;
641 thread_t raid_thread;
642 struct callout raid_timer;
643
644 /*
645 * PCI Hardware info
646 */
647#ifdef OLD_MSI
648 int pci_msi_count;
649#endif
650 int irq_type; /* Interrupt type */
651 struct resource * pci_irq; /* Interrupt map for chip */
652 void * ih; /* Interrupt handle */
653#if 0
654 struct mpt_pci_cfg pci_cfg; /* saved PCI conf registers */
655#endif
656
657 /*
658 * DMA Mapping Stuff
659 */
660 struct resource * pci_reg; /* Register map for chip */
661 bus_space_tag_t pci_st; /* Bus tag for registers */
662 bus_space_handle_t pci_sh; /* Bus handle for registers */
663 /* PIO versions of above. */
664 struct resource * pci_pio_reg;
665 bus_space_tag_t pci_pio_st;
666 bus_space_handle_t pci_pio_sh;
667
668 bus_dma_tag_t parent_dmat; /* DMA tag for parent PCI bus */
669 bus_dma_tag_t reply_dmat; /* DMA tag for reply memory */
670 bus_dmamap_t reply_dmap; /* DMA map for reply memory */
671 uint8_t *reply; /* KVA of reply memory */
672 bus_addr_t reply_phys; /* BusAddr of reply memory */
673
674 bus_dma_tag_t buffer_dmat; /* DMA tag for buffers */
675 bus_dma_tag_t request_dmat; /* DMA tag for request memroy */
676 bus_dmamap_t request_dmap; /* DMA map for request memroy */
677 uint8_t *request; /* KVA of Request memory */
678 bus_addr_t request_phys; /* BusAddr of request memory */
679
680 uint32_t max_seg_cnt; /* calculated after IOC facts */
681 uint32_t max_cam_seg_cnt;/* calculated from MAXPHYS*/
682
683 /*
684 * Hardware management
685 */
686 u_int reset_cnt;
687
688 /*
689 * CAM && Software Management
690 */
691 request_t *request_pool;
692 struct req_queue request_free_list;
693 struct req_queue request_pending_list;
694 struct req_queue request_timeout_list;
695
696
697 struct cam_sim *sim;
698 struct cam_path *path;
699
700 struct cam_sim *phydisk_sim;
701 struct cam_path *phydisk_path;
702
703 thread_t recovery_thread;
704 request_t *tmf_req;
705
706 /*
707 * Deferred frame acks due to resource shortage.
708 */
709 struct mpt_evtf_list ack_frames;
710
711 /*
712 * Target Mode Support
713 */
714 uint32_t scsi_tgt_handler_id;
715 request_t ** tgt_cmd_ptrs;
716 request_t ** els_cmd_ptrs; /* FC only */
717
718 /*
719 * *snork*- this is chosen to be here *just in case* somebody
720 * forgets to point to it exactly and we index off of trt with
721 * CAM_LUN_WILDCARD.
722 */
723 tgt_resource_t trt_wildcard; /* wildcard luns */
724 tgt_resource_t trt[MPT_MAX_LUNS];
725 uint16_t tgt_cmds_allocated;
726 uint16_t els_cmds_allocated; /* FC only */
727
728 uint16_t timeouts; /* timeout count */
729 uint16_t success; /* successes afer timeout */
730 uint16_t sequence; /* Sequence Number */
731 uint16_t pad3;
732
733
734 /* Paired port in some dual adapters configurations */
735 struct mpt_softc * mpt2;
736
737 /* FW Image management */
738 uint32_t fw_image_size;
739 uint8_t *fw_image;
740 bus_dma_tag_t fw_dmat; /* DMA tag for firmware image */
741 bus_dmamap_t fw_dmap; /* DMA map for firmware image */
742 bus_addr_t fw_phys; /* BusAddr of firmware image */
743
744 /* SAS Topology */
745 struct mptsas_portinfo *sas_portinfo;
746
747 /* Shutdown Event Handler. */
748 eventhandler_tag eh;
749
750 /* Userland management interface. */
751 struct cdev *cdev;
752
753 struct sysctl_ctx_list mpt_sysctl_ctx;
754 struct sysctl_oid *mpt_sysctl_tree;
755
756 TAILQ_ENTRY(mpt_softc) links;
757};
758
759static __inline void mpt_assign_serno(struct mpt_softc *, request_t *);
760
761static __inline void
762mpt_assign_serno(struct mpt_softc *mpt, request_t *req)
763{
764 if ((req->serno = mpt->sequence++) == 0) {
765 req->serno = mpt->sequence++;
766 }
767}
768
769/***************************** Locking Primitives *****************************/
770#define MPT_IFLAGS INTR_MPSAFE
771#define MPT_LOCK_SETUP(mpt) \
772 lockinit(&mpt->mpt_lock, "mpt", 0, LK_CANRECURSE); \
773 mpt->mpt_locksetup = 1
774#define MPT_LOCK_DESTROY(mpt) \
775 if (mpt->mpt_locksetup) { \
776 lockuninit(&mpt->mpt_lock); \
777 mpt->mpt_locksetup = 0; \
778 }
779
780#define MPT_LOCK(mpt) lockmgr(&(mpt)->mpt_lock, LK_EXCLUSIVE)
781#define MPT_UNLOCK(mpt) lockmgr(&(mpt)->mpt_lock, LK_RELEASE)
782#define MPT_OWNED(mpt) lockstatus(&(mpt)->mpt_lock, curthread)
783#define MPT_LOCK_ASSERT(mpt) KKASSERT(lockstatus(&(mpt)->mpt_lock, curthread) != 0)
784#define MPTLOCK_2_CAMLOCK(mpt)
785#define CAMLOCK_2_MPTLOCK(mpt)
786#define mpt_sleep(mpt, ident, priority, wmesg, timo) \
787 lksleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo)
788#define mpt_req_timeout(req, ticks, func, arg) \
789 callout_reset(&(req)->callout, (ticks), (func), (arg))
790#define mpt_req_untimeout(req, func, arg) \
791 callout_stop(&(req)->callout)
792#define mpt_callout_init(mpt, c) \
793 callout_init(c)
794
795/******************************* Register Access ******************************/
796static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t);
797static __inline uint32_t mpt_read(struct mpt_softc *, int);
798static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t);
799static __inline uint32_t mpt_pio_read(struct mpt_softc *, int);
800
801static __inline void
802mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
803{
804 bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val);
805}
806
807static __inline uint32_t
808mpt_read(struct mpt_softc *mpt, int offset)
809{
810 return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset));
811}
812
813/*
814 * Some operations (e.g. diagnostic register writes while the ARM proccessor
815 * is disabled), must be performed using "PCI pio" operations. On non-PCI
816 * busses, these operations likely map to normal register accesses.
817 */
818static __inline void
819mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
820{
821 KASSERT(mpt->pci_pio_reg != NULL, ("no PIO resource"));
822 bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val);
823}
824
825static __inline uint32_t
826mpt_pio_read(struct mpt_softc *mpt, int offset)
827{
828 KASSERT(mpt->pci_pio_reg != NULL, ("no PIO resource"));
829 return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset));
830}
831/*********************** Reply Frame/Request Management ***********************/
832/* Max MPT Reply we are willing to accept (must be power of 2) */
833#define MPT_REPLY_SIZE 256
834
835/*
836 * Must be less than 16384 in order for target mode to work
837 */
838#define MPT_MAX_REQUESTS(mpt) 512
839#define MPT_REQUEST_AREA 512
840#define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_AREA */
841#define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA)
842
843#define MPT_CONTEXT_CB_SHIFT (16)
844#define MPT_CBI(handle) (handle >> MPT_CONTEXT_CB_SHIFT)
845#define MPT_CBI_TO_HID(cbi) ((cbi) << MPT_CONTEXT_CB_SHIFT)
846#define MPT_CONTEXT_TO_CBI(x) \
847 (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1))
848#define MPT_CONTEXT_REQI_MASK 0xFFFF
849#define MPT_CONTEXT_TO_REQI(x) ((x) & MPT_CONTEXT_REQI_MASK)
850
851/*
852 * Convert a 32bit physical address returned from IOC to an
853 * offset into our reply frame memory or the kvm address needed
854 * to access the data. The returned address is only the low
855 * 32 bits, so mask our base physical address accordingly.
856 */
857#define MPT_REPLY_BADDR(x) \
858 (x << 1)
859#define MPT_REPLY_OTOV(m, i) \
860 ((void *)(&m->reply[i]))
861
862#define MPT_DUMP_REPLY_FRAME(mpt, reply_frame) \
863do { \
864 if (mpt->verbose > MPT_PRT_DEBUG) \
865 mpt_dump_reply_frame(mpt, reply_frame); \
866} while(0)
867
868static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt);
869static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr);
870
871/*
872 * Give the reply buffer back to the IOC after we have
873 * finished processing it.
874 */
875static __inline void
876mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr)
877{
878 mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr);
879}
880
881/* Get a reply from the IOC */
882static __inline uint32_t
883mpt_pop_reply_queue(struct mpt_softc *mpt)
884{
885 return mpt_read(mpt, MPT_OFFSET_REPLY_Q);
886}
887
888void
889mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int);
890
891/************************** Scatter Gather Management **************************/
892/* MPT_RQSL- size of request frame, in bytes */
893#define MPT_RQSL(mpt) (mpt->ioc_facts.RequestFrameSize << 2)
894
895/* MPT_NSGL- how many SG entries can fit in a request frame size */
896#define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_IO_UNION))
897
898/* MPT_NRFM- how many request frames can fit in each request alloc we make */
899#define MPT_NRFM(mpt) (MPT_REQUEST_AREA / MPT_RQSL(mpt))
900
901/*
902 * MPT_NSGL_FIRST- # of SG elements that can fit after
903 * an I/O request but still within the request frame.
904 * Do this safely based upon SGE_IO_UNION.
905 *
906 * Note that the first element is *within* the SCSI request.
907 */
908#define MPT_NSGL_FIRST(mpt) \
909 ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \
910 sizeof (SGE_IO_UNION))
911
912/***************************** IOC Initialization *****************************/
913int mpt_reset(struct mpt_softc *, int /*reinit*/);
914
915/****************************** Debugging ************************************/
916void mpt_dump_data(struct mpt_softc *, const char *, void *, int);
917void mpt_dump_request(struct mpt_softc *, request_t *);
918
919enum {
920 MPT_PRT_ALWAYS,
921 MPT_PRT_FATAL,
922 MPT_PRT_ERROR,
923 MPT_PRT_WARN,
924 MPT_PRT_INFO,
925 MPT_PRT_NEGOTIATION,
926 MPT_PRT_DEBUG,
927 MPT_PRT_DEBUG1,
928 MPT_PRT_DEBUG2,
929 MPT_PRT_DEBUG3,
930 MPT_PRT_TRACE,
931 MPT_PRT_NONE=100
932};
933
934#define mpt_lprt(mpt, level, ...) \
935do { \
936 if (level <= (mpt)->verbose) \
937 mpt_prt(mpt, __VA_ARGS__); \
938} while (0)
939
940#if 0
941#define mpt_lprtc(mpt, level, ...) \
942do { \
943 if (level <= (mpt)->verbose) \
944 mpt_prtc(mpt, __VA_ARGS__); \
945} while (0)
946#endif
947void mpt_prt(struct mpt_softc *, const char *, ...)
948 __printflike(2, 3);
949void mpt_prtc(struct mpt_softc *, const char *, ...)
950 __printflike(2, 3);
951
952/**************************** Target Mode Related ***************************/
953static __inline int mpt_cdblen(uint8_t, int);
954static __inline int
955mpt_cdblen(uint8_t cdb0, int maxlen)
956{
957 int group = cdb0 >> 5;
958 switch (group) {
959 case 0:
960 return (6);
961 case 1:
962 return (10);
963 case 4:
964 case 5:
965 return (12);
966 default:
967 return (16);
968 }
969}
970#ifdef INVARIANTS
971static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t);
972static __inline request_t *
973mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag)
974{
975 uint16_t rtg = (tag >> 18);
976 KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d", tag));
977 KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array"));
978 KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer"));
979 return (mpt->tgt_cmd_ptrs[rtg]);
980}
981#endif
982
983static __inline int
984mpt_req_on_free_list(struct mpt_softc *, request_t *);
985static __inline int
986mpt_req_on_pending_list(struct mpt_softc *, request_t *);
987
988/*
989 * Is request on freelist?
990 */
991static __inline int
992mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req)
993{
994 request_t *lrq;
995
996 TAILQ_FOREACH(lrq, &mpt->request_free_list, links) {
997 if (lrq == req) {
998 return (1);
999 }
1000 }
1001 return (0);
1002}
1003
1004/*
1005 * Is request on pending list?
1006 */
1007static __inline int
1008mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req)
1009{
1010 request_t *lrq;
1011
1012 TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) {
1013 if (lrq == req) {
1014 return (1);
1015 }
1016 }
1017 return (0);
1018}
1019
1020#ifdef INVARIANTS
1021static __inline void
1022mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int);
1023static __inline void
1024mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int);
1025
1026/*
1027 * Make sure that req *is* part of one of the special lists
1028 */
1029static __inline void
1030mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1031{
1032 int i;
1033 for (i = 0; i < mpt->els_cmds_allocated; i++) {
1034 if (req == mpt->els_cmd_ptrs[i]) {
1035 return;
1036 }
1037 }
1038 for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1039 if (req == mpt->tgt_cmd_ptrs[i]) {
1040 return;
1041 }
1042 }
1043 panic("%s(%d): req %p:%u function %x not in els or tgt ptrs",
1044 s, line, req, req->serno,
1045 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function);
1046}
1047
1048/*
1049 * Make sure that req is *not* part of one of the special lists.
1050 */
1051static __inline void
1052mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1053{
1054 int i;
1055 for (i = 0; i < mpt->els_cmds_allocated; i++) {
1056 KASSERT(req != mpt->els_cmd_ptrs[i],
1057 ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d",
1058 s, line, req, req->serno,
1059 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1060 }
1061 for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1062 KASSERT(req != mpt->tgt_cmd_ptrs[i],
1063 ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d",
1064 s, line, req, req->serno,
1065 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1066 }
1067}
1068#endif
1069
1070/*
1071 * Task Management Types, purely for internal consumption
1072 */
1073typedef enum {
1074 MPT_ABORT_TASK_SET=1234,
1075 MPT_CLEAR_TASK_SET,
1076 MPT_TARGET_RESET,
1077 MPT_CLEAR_ACA,
1078 MPT_TERMINATE_TASK,
1079 MPT_NIL_TMT_VALUE=5678
1080} mpt_task_mgmt_t;
1081
1082/**************************** Unclassified Routines ***************************/
1083void mpt_send_cmd(struct mpt_softc *mpt, request_t *req);
1084int mpt_recv_handshake_reply(struct mpt_softc *mpt,
1085 size_t reply_len, void *reply);
1086int mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1087 mpt_req_state_t state, mpt_req_state_t mask,
1088 int sleep_ok, int time_ms);
1089void mpt_enable_ints(struct mpt_softc *mpt);
1090void mpt_disable_ints(struct mpt_softc *mpt);
1091int mpt_attach(struct mpt_softc *mpt);
1092int mpt_shutdown(struct mpt_softc *mpt);
1093int mpt_detach(struct mpt_softc *mpt);
1094int mpt_send_handshake_cmd(struct mpt_softc *mpt,
1095 size_t len, void *cmd);
1096request_t * mpt_get_request(struct mpt_softc *mpt, int sleep_ok);
1097void mpt_free_request(struct mpt_softc *mpt, request_t *req);
1098void mpt_intr(void *arg);
1099void mpt_check_doorbell(struct mpt_softc *mpt);
1100void mpt_dump_reply_frame(struct mpt_softc *mpt,
1101 MSG_DEFAULT_REPLY *reply_frame);
1102
1103int mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/,
1104 cfgparms_t *params,
1105 bus_addr_t /*addr*/, bus_size_t/*len*/,
1106 int /*sleep_ok*/, int /*timeout_ms*/);
1107int mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion,
1108 int PageNumber, uint32_t PageAddress,
1109 int ExtPageType,
1110 CONFIG_EXTENDED_PAGE_HEADER *rslt,
1111 int sleep_ok, int timeout_ms);
1112int mpt_read_extcfg_page(struct mpt_softc *mpt, int Action,
1113 uint32_t PageAddress,
1114 CONFIG_EXTENDED_PAGE_HEADER *hdr,
1115 void *buf, size_t len, int sleep_ok,
1116 int timeout_ms);
1117int mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/,
1118 int /*PageNumber*/,
1119 uint32_t /*PageAddress*/,
1120 CONFIG_PAGE_HEADER *,
1121 int /*sleep_ok*/, int /*timeout_ms*/);
1122int mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/,
1123 uint32_t /*PageAddress*/,
1124 CONFIG_PAGE_HEADER *, size_t /*len*/,
1125 int /*sleep_ok*/, int /*timeout_ms*/);
1126int mpt_write_cfg_page(struct mpt_softc *, int /*Action*/,
1127 uint32_t /*PageAddress*/,
1128 CONFIG_PAGE_HEADER *, size_t /*len*/,
1129 int /*sleep_ok*/, int /*timeout_ms*/);
1130static __inline int
1131mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1132 CONFIG_PAGE_HEADER *hdr, size_t len,
1133 int sleep_ok, int timeout_ms)
1134{
1135 return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
1136 PageAddress, hdr, len, sleep_ok, timeout_ms));
1137}
1138
1139static __inline int
1140mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1141 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1142 int timeout_ms)
1143{
1144 return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT,
1145 PageAddress, hdr, len, sleep_ok,
1146 timeout_ms));
1147}
1148/* mpt_debug.c functions */
1149void mpt_print_reply(void *vmsg);
1150void mpt_print_db(uint32_t mb);
1151void mpt_print_config_reply(void *vmsg);
1152char *mpt_ioc_diag(uint32_t diag);
1153void mpt_req_state(mpt_req_state_t state);
1154void mpt_print_config_request(void *vmsg);
1155void mpt_print_request(void *vmsg);
1156void mpt_dump_sgl(SGE_IO_UNION *se, int offset);
1157#endif /* _MPT_H_ */