Fix a couple of NULL dereferences in error paths.
[dragonfly.git] / sys / dev / raid / mrsas / mrsas.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2014, LSI Corp.
3 * All rights reserved.
4 * Author: Marian Choy
5 * Support: freebsdraid@lsi.com
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of the <ORGANIZATION> nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 *
34 * The views and conclusions contained in the software and documentation
35 * are those of the authors and should not be interpreted as representing
36 * official policies,either expressed or implied, of the FreeBSD Project.
37 *
38 * Send feedback to: <megaraidfbsd@lsi.com>
39 * Mail to: LSI Corporation, 1621 Barber Lane, Milpitas, CA 95035
40 * ATTN: MegaRaid FreeBSD
41 *
42 * $FreeBSD: head/sys/dev/mrsas/mrsas.c 265555 2014-05-07 16:16:49Z ambrisko $
43 */
44
45#include <dev/raid/mrsas/mrsas.h>
46#include <dev/raid/mrsas/mrsas_ioctl.h>
47
48#include <bus/cam/cam.h>
49#include <bus/cam/cam_ccb.h>
50
51#include <sys/sysctl.h>
52#include <sys/types.h>
53#include <sys/kthread.h>
54#include <sys/taskqueue.h>
55#include <sys/device.h>
56#include <sys/spinlock2.h>
57
58
59/*
60 * Function prototypes
61 */
62static d_open_t mrsas_open;
63static d_close_t mrsas_close;
64static d_read_t mrsas_read;
65static d_write_t mrsas_write;
66static d_ioctl_t mrsas_ioctl;
67
68static struct mrsas_ident *mrsas_find_ident(device_t);
69static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode);
70static void mrsas_flush_cache(struct mrsas_softc *sc);
71static void mrsas_reset_reply_desc(struct mrsas_softc *sc);
72static void mrsas_ocr_thread(void *arg);
73static int mrsas_get_map_info(struct mrsas_softc *sc);
74static int mrsas_get_ld_map_info(struct mrsas_softc *sc);
75static int mrsas_sync_map_info(struct mrsas_softc *sc);
76static int mrsas_get_pd_list(struct mrsas_softc *sc);
77static int mrsas_get_ld_list(struct mrsas_softc *sc);
78static int mrsas_setup_irq(struct mrsas_softc *sc);
79static int mrsas_alloc_mem(struct mrsas_softc *sc);
80static int mrsas_init_fw(struct mrsas_softc *sc);
81static int mrsas_setup_raidmap(struct mrsas_softc *sc);
82static int mrsas_complete_cmd(struct mrsas_softc *sc);
83static int mrsas_clear_intr(struct mrsas_softc *sc);
84static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
85 struct mrsas_ctrl_info *ctrl_info);
86static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
87 struct mrsas_mfi_cmd *cmd_to_abort);
88u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset);
89u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc,
90 struct mrsas_mfi_cmd *mfi_cmd);
91int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr);
92int mrsas_init_adapter(struct mrsas_softc *sc);
93int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc);
94int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc);
95int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc);
96int mrsas_ioc_init(struct mrsas_softc *sc);
97int mrsas_bus_scan(struct mrsas_softc *sc);
98int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
99int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
100int mrsas_reset_ctrl(struct mrsas_softc *sc);
101int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
102int mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
103 struct mrsas_mfi_cmd *cmd);
104int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
105 int size);
106void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd);
107void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
108void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
109void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
110void mrsas_disable_intr(struct mrsas_softc *sc);
111void mrsas_enable_intr(struct mrsas_softc *sc);
112void mrsas_free_ioc_cmd(struct mrsas_softc *sc);
113void mrsas_free_mem(struct mrsas_softc *sc);
114void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp);
115void mrsas_isr(void *arg);
116void mrsas_teardown_intr(struct mrsas_softc *sc);
117void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
118void mrsas_kill_hba (struct mrsas_softc *sc);
119void mrsas_aen_handler(struct mrsas_softc *sc);
120void mrsas_write_reg(struct mrsas_softc *sc, int offset,
121 u_int32_t value);
122void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
123 u_int32_t req_desc_hi);
124void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc);
125void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc,
126 struct mrsas_mfi_cmd *cmd, u_int8_t status);
127void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status,
128 u_int8_t extStatus);
129struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc);
130MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc,
131 struct mrsas_mfi_cmd *cmd);
132
133extern int mrsas_cam_attach(struct mrsas_softc *sc);
134extern void mrsas_cam_detach(struct mrsas_softc *sc);
135extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd);
136extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
137extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc);
138extern void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd);
139extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
140extern int mrsas_passthru(struct mrsas_softc *sc, void *arg);
141extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc);
142extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
143extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_FW_RAID_MAP_ALL *map);
144extern void mrsas_xpt_freeze(struct mrsas_softc *sc);
145extern void mrsas_xpt_release(struct mrsas_softc *sc);
146extern MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_get_request_desc(struct mrsas_softc *sc,
147 u_int16_t index);
148extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim);
149static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc);
150static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc);
151SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters");
152
153
154/**
155 * PCI device struct and table
156 *
157 */
158typedef struct mrsas_ident {
159 uint16_t vendor;
160 uint16_t device;
161 uint16_t subvendor;
162 uint16_t subdevice;
163 const char *desc;
164} MRSAS_CTLR_ID;
165
166MRSAS_CTLR_ID device_table[] = {
167 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "LSI Thunderbolt SAS Controller"},
168 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "LSI Invader SAS Controller"},
169 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "LSI Fury SAS Controller"},
170 {0, 0, 0, 0, NULL}
171};
172
173/**
174 * Character device entry points
175 *
176 */
177static struct dev_ops mrsas_ops = {
178 { "mrsas", 0, D_MPSAFE },
179 .d_open = mrsas_open,
180 .d_close = mrsas_close,
181 .d_read = mrsas_read,
182 .d_write = mrsas_write,
183 .d_ioctl = mrsas_ioctl,
184};
185
186MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver");
187
188static int mrsas_mfi_enable = 0;
189TUNABLE_INT("hw.mrsas.mfi_enable", &mrsas_mfi_enable);
190
191static int mrsas_msi_enable = 1;
192TUNABLE_INT("hw.mrsas.msi.enable", &mrsas_msi_enable);
193
194/**
195 * In the cdevsw routines, we find our softc by using the si_drv1 member
196 * of struct cdev. We set this variable to point to our softc in our
197 * attach routine when we create the /dev entry.
198 */
199int
200mrsas_open(struct dev_open_args *ap)
201{
202 cdev_t dev = ap->a_head.a_dev;
203 struct mrsas_softc *sc;
204
205 sc = dev->si_drv1;
206 return (0);
207}
208
209int
210mrsas_close(struct dev_close_args *ap)
211{
212 cdev_t dev = ap->a_head.a_dev;
213 struct mrsas_softc *sc;
214
215 sc = dev->si_drv1;
216 return (0);
217}
218
219int
220mrsas_read(struct dev_read_args *ap)
221{
222 cdev_t dev = ap->a_head.a_dev;
223 struct mrsas_softc *sc;
224
225 sc = dev->si_drv1;
226 return (0);
227}
228int
229mrsas_write(struct dev_write_args *ap)
230{
231 cdev_t dev = ap->a_head.a_dev;
232 struct mrsas_softc *sc;
233
234 sc = dev->si_drv1;
235 return (0);
236}
237
238/**
239 * Register Read/Write Functions
240 *
241 */
242void
243mrsas_write_reg(struct mrsas_softc *sc, int offset,
244 u_int32_t value)
245{
246 bus_space_tag_t bus_tag = sc->bus_tag;
247 bus_space_handle_t bus_handle = sc->bus_handle;
248
249 bus_space_write_4(bus_tag, bus_handle, offset, value);
250}
251
252u_int32_t
253mrsas_read_reg(struct mrsas_softc *sc, int offset)
254{
255 bus_space_tag_t bus_tag = sc->bus_tag;
256 bus_space_handle_t bus_handle = sc->bus_handle;
257
258 return((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset));
259}
260
261
262/**
263 * Interrupt Disable/Enable/Clear Functions
264 *
265 */
266void mrsas_disable_intr(struct mrsas_softc *sc)
267{
268 u_int32_t mask = 0xFFFFFFFF;
269 u_int32_t status;
270
271 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask);
272 /* Dummy read to force pci flush */
273 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
274}
275
276void mrsas_enable_intr(struct mrsas_softc *sc)
277{
278 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK;
279 u_int32_t status;
280
281 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0);
282 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
283
284 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask);
285 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask));
286}
287
288static int mrsas_clear_intr(struct mrsas_softc *sc)
289{
290 u_int32_t status, fw_status, fw_state;
291
292 /* Read received interrupt */
293 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
294
295 /* If FW state change interrupt is received, write to it again to clear */
296 if (status & MRSAS_FW_STATE_CHNG_INTERRUPT) {
297 fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
298 outbound_scratch_pad));
299 fw_state = fw_status & MFI_STATE_MASK;
300 if (fw_state == MFI_STATE_FAULT) {
301 device_printf(sc->mrsas_dev, "FW is in FAULT state!\n");
302 if(sc->ocr_thread_active)
303 wakeup(&sc->ocr_chan);
304 }
305 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), status);
306 mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status));
307 return(1);
308 }
309
310 /* Not our interrupt, so just return */
311 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
312 return(0);
313
314 /* We got a reply interrupt */
315 return(1);
316}
317
318/**
319 * PCI Support Functions
320 *
321 */
322static struct mrsas_ident * mrsas_find_ident(device_t dev)
323{
324 struct mrsas_ident *pci_device;
325
326 for (pci_device=device_table; pci_device->vendor != 0; pci_device++)
327 {
328 if ((pci_device->vendor == pci_get_vendor(dev)) &&
329 (pci_device->device == pci_get_device(dev)) &&
330 ((pci_device->subvendor == pci_get_subvendor(dev)) ||
331 (pci_device->subvendor == 0xffff)) &&
332 ((pci_device->subdevice == pci_get_subdevice(dev)) ||
333 (pci_device->subdevice == 0xffff)))
334 return (pci_device);
335 }
336 return (NULL);
337}
338
339static int mrsas_probe(device_t dev)
340{
341 static u_int8_t first_ctrl = 1;
342 struct mrsas_ident *id;
343
344 if ((id = mrsas_find_ident(dev)) != NULL) {
345 /* give priority to mfi(4) if tunable set */
346 TUNABLE_INT_FETCH("hw.mrsas.mfi_enable", &mrsas_mfi_enable);
347 if ((id->device == MRSAS_TBOLT) && mrsas_mfi_enable) {
348 return (ENXIO);
349 } else {
350 if (first_ctrl) {
351 kprintf("LSI MegaRAID SAS FreeBSD mrsas driver version: %s\n",
352 MRSAS_VERSION);
353 first_ctrl = 0;
354 }
355 device_set_desc(dev, id->desc);
356 return (BUS_PROBE_DEFAULT);
357 }
358 }
359 return (ENXIO);
360}
361
362/**
363 * mrsas_setup_sysctl: setup sysctl values for mrsas
364 * input: Adapter instance soft state
365 *
366 * Setup sysctl entries for mrsas driver.
367 */
368static void
369mrsas_setup_sysctl(struct mrsas_softc *sc)
370{
371 struct sysctl_ctx_list *sysctl_ctx = NULL;
372 struct sysctl_oid *sysctl_tree = NULL;
373 char tmpstr[80], tmpstr2[80];
374
375 /*
376 * Setup the sysctl variable so the user can change the debug level
377 * on the fly.
378 */
379 ksnprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d",
380 device_get_unit(sc->mrsas_dev));
381 ksnprintf(tmpstr2, sizeof(tmpstr2), "mrsas%d", device_get_unit(sc->mrsas_dev));
382
383 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev);
384 if (sysctl_ctx != NULL)
385 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev);
386
387 if (sysctl_tree == NULL) {
388 sysctl_ctx_init(&sc->sysctl_ctx);
389 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
390 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, tmpstr2,
391 CTLFLAG_RD, 0, tmpstr);
392 if (sc->sysctl_tree == NULL)
393 return;
394 sysctl_ctx = &sc->sysctl_ctx;
395 sysctl_tree = sc->sysctl_tree;
396 }
397 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
398 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0,
399 "Disable the use of OCR");
400
401 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
402 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION,
403 strlen(MRSAS_VERSION), "driver version");
404
405 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
406 OID_AUTO, "reset_count", CTLFLAG_RD,
407 &sc->reset_count, 0, "number of ocr from start of the day");
408
409 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
410 OID_AUTO, "fw_outstanding", CTLFLAG_RD,
411 &sc->fw_outstanding, 0, "FW outstanding commands");
412
413 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
414 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
415 &sc->io_cmds_highwater, 0, "Max FW outstanding commands");
416
417 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
418 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0,
419 "Driver debug level");
420
421 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
422 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout,
423 0, "Driver IO timeout value in mili-second.");
424
425 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
426 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW,
427 &sc->mrsas_fw_fault_check_delay,
428 0, "FW fault check thread delay in seconds. <default is 1 sec>");
429
430 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
431 OID_AUTO, "reset_in_progress", CTLFLAG_RD,
432 &sc->reset_in_progress, 0, "ocr in progress status");
433
434}
435
436/**
437 * mrsas_get_tunables: get tunable parameters.
438 * input: Adapter instance soft state
439 *
440 * Get tunable parameters. This will help to debug driver at boot time.
441 */
442static void
443mrsas_get_tunables(struct mrsas_softc *sc)
444{
445 char tmpstr[80];
446
447 /* XXX default to some debugging for now */
448 sc->mrsas_debug = MRSAS_FAULT;
449 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT;
450 sc->mrsas_fw_fault_check_delay = 1;
451 sc->reset_count = 0;
452 sc->reset_in_progress = 0;
453
454 /*
455 * Grab the global variables.
456 */
457 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug);
458
459 /* Grab the unit-instance variables */
460 ksnprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level",
461 device_get_unit(sc->mrsas_dev));
462 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug);
463}
464
465/**
466 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information.
467 * Used to get sequence number at driver load time.
468 * input: Adapter soft state
469 *
470 * Allocates DMAable memory for the event log info internal command.
471 */
472int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc)
473{
474 int el_info_size;
475
476 /* Allocate get event log info command */
477 el_info_size = sizeof(struct mrsas_evt_log_info);
478 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
479 1, 0, // algnmnt, boundary
480 BUS_SPACE_MAXADDR_32BIT,// lowaddr
481 BUS_SPACE_MAXADDR, // highaddr
482 NULL, NULL, // filter, filterarg
483 el_info_size, // maxsize
484 1, // msegments
485 el_info_size, // maxsegsize
486 BUS_DMA_ALLOCNOW, // flags
487 &sc->el_info_tag)) {
488 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n");
489 return (ENOMEM);
490 }
491 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem,
492 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) {
493 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n");
494 return (ENOMEM);
495 }
496 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap,
497 sc->el_info_mem, el_info_size, mrsas_addr_cb,
498 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) {
499 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n");
500 return (ENOMEM);
501 }
502
503 memset(sc->el_info_mem, 0, el_info_size);
504 return (0);
505}
506
507/**
508 * mrsas_free_evt_info_cmd: Free memory for Event log info command
509 * input: Adapter soft state
510 *
511 * Deallocates memory for the event log info internal command.
512 */
513void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc)
514{
515 if (sc->el_info_phys_addr)
516 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap);
517 if (sc->el_info_mem != NULL)
518 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap);
519 if (sc->el_info_tag != NULL)
520 bus_dma_tag_destroy(sc->el_info_tag);
521}
522
523/**
524 * mrsas_get_seq_num: Get latest event sequence number
525 * @sc: Adapter soft state
526 * @eli: Firmware event log sequence number information.
527 * Firmware maintains a log of all events in a non-volatile area.
528 * Driver get the sequence number using DCMD
529 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time.
530 */
531
532static int
533mrsas_get_seq_num(struct mrsas_softc *sc,
534 struct mrsas_evt_log_info *eli)
535{
536 struct mrsas_mfi_cmd *cmd;
537 struct mrsas_dcmd_frame *dcmd;
538
539 cmd = mrsas_get_mfi_cmd(sc);
540
541 if (!cmd) {
542 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
543 return -ENOMEM;
544 }
545
546 dcmd = &cmd->frame->dcmd;
547
548 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) {
549 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n");
550 mrsas_release_mfi_cmd(cmd);
551 return -ENOMEM;
552 }
553
554 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
555
556 dcmd->cmd = MFI_CMD_DCMD;
557 dcmd->cmd_status = 0x0;
558 dcmd->sge_count = 1;
559 dcmd->flags = MFI_FRAME_DIR_READ;
560 dcmd->timeout = 0;
561 dcmd->pad_0 = 0;
562 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info);
563 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO;
564 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
565 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
566
567 mrsas_issue_blocked_cmd(sc, cmd);
568
569 /*
570 * Copy the data back into callers buffer
571 */
572 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
573 mrsas_free_evt_log_info_cmd(sc);
574 mrsas_release_mfi_cmd(cmd);
575
576 return 0;
577}
578
579
580/**
581 * mrsas_register_aen: Register for asynchronous event notification
582 * @sc: Adapter soft state
583 * @seq_num: Starting sequence number
584 * @class_locale: Class of the event
585 * This function subscribes for events beyond the @seq_num
586 * and type @class_locale.
587 *
588 * */
589static int
590mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num,
591 u_int32_t class_locale_word)
592{
593 int ret_val;
594 struct mrsas_mfi_cmd *cmd;
595 struct mrsas_dcmd_frame *dcmd;
596 union mrsas_evt_class_locale curr_aen;
597 union mrsas_evt_class_locale prev_aen;
598
599/*
600 * If there an AEN pending already (aen_cmd), check if the
601 * class_locale of that pending AEN is inclusive of the new
602 * AEN request we currently have. If it is, then we don't have
603 * to do anything. In other words, whichever events the current
604 * AEN request is subscribing to, have already been subscribed
605 * to.
606 * If the old_cmd is _not_ inclusive, then we have to abort
607 * that command, form a class_locale that is superset of both
608 * old and current and re-issue to the FW
609 * */
610
611 curr_aen.word = class_locale_word;
612
613 if (sc->aen_cmd) {
614
615 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1];
616
617/*
618 * A class whose enum value is smaller is inclusive of all
619 * higher values. If a PROGRESS (= -1) was previously
620 * registered, then a new registration requests for higher
621 * classes need not be sent to FW. They are automatically
622 * included.
623 * Locale numbers don't have such hierarchy. They are bitmap values
624 */
625 if ((prev_aen.members.class <= curr_aen.members.class) &&
626 !((prev_aen.members.locale & curr_aen.members.locale) ^
627 curr_aen.members.locale)) {
628 /*
629 * Previously issued event registration includes
630 * current request. Nothing to do.
631 */
632 return 0;
633 } else {
634 curr_aen.members.locale |= prev_aen.members.locale;
635
636 if (prev_aen.members.class < curr_aen.members.class)
637 curr_aen.members.class = prev_aen.members.class;
638
639 sc->aen_cmd->abort_aen = 1;
640 ret_val = mrsas_issue_blocked_abort_cmd(sc,
641 sc->aen_cmd);
642
643 if (ret_val) {
644 kprintf("mrsas: Failed to abort "
645 "previous AEN command\n");
646 return ret_val;
647 }
648 }
649 }
650
651 cmd = mrsas_get_mfi_cmd(sc);
652
653 if (!cmd)
654 return -ENOMEM;
655
656 dcmd = &cmd->frame->dcmd;
657
658 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail));
659
660/*
661 * Prepare DCMD for aen registration
662 */
663 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
664
665 dcmd->cmd = MFI_CMD_DCMD;
666 dcmd->cmd_status = 0x0;
667 dcmd->sge_count = 1;
668 dcmd->flags = MFI_FRAME_DIR_READ;
669 dcmd->timeout = 0;
670 dcmd->pad_0 = 0;
671 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail);
672 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT;
673 dcmd->mbox.w[0] = seq_num;
674 sc->last_seq_num = seq_num;
675 dcmd->mbox.w[1] = curr_aen.word;
676 dcmd->sgl.sge32[0].phys_addr = (u_int32_t) sc->evt_detail_phys_addr;
677 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail);
678
679 if (sc->aen_cmd != NULL) {
680 mrsas_release_mfi_cmd(cmd);
681 return 0;
682 }
683
684 /*
685 * Store reference to the cmd used to register for AEN. When an
686 * application wants us to register for AEN, we have to abort this
687 * cmd and re-register with a new EVENT LOCALE supplied by that app
688 */
689 sc->aen_cmd = cmd;
690
691 /*
692 Issue the aen registration frame
693 */
694 if (mrsas_issue_dcmd(sc, cmd)){
695 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n");
696 return(1);
697 }
698
699 return 0;
700}
701/**
702 * mrsas_start_aen - Subscribes to AEN during driver load time
703 * @instance: Adapter soft state
704 */
705static int mrsas_start_aen(struct mrsas_softc *sc)
706{
707 struct mrsas_evt_log_info eli;
708 union mrsas_evt_class_locale class_locale;
709
710
711 /* Get the latest sequence number from FW*/
712
713 memset(&eli, 0, sizeof(eli));
714
715 if (mrsas_get_seq_num(sc, &eli))
716 return -1;
717
718 /* Register AEN with FW for latest sequence number plus 1*/
719 class_locale.members.reserved = 0;
720 class_locale.members.locale = MR_EVT_LOCALE_ALL;
721 class_locale.members.class = MR_EVT_CLASS_DEBUG;
722
723 return mrsas_register_aen(sc, eli.newest_seq_num + 1,
724 class_locale.word);
725}
726
727/**
728 * mrsas_attach: PCI entry point
729 * input: device struct pointer
730 *
731 * Performs setup of PCI and registers, initializes mutexes and
732 * linked lists, registers interrupts and CAM, and initializes
733 * the adapter/controller to its proper state.
734 */
735static int mrsas_attach(device_t dev)
736{
737 struct mrsas_softc *sc = device_get_softc(dev);
738 uint32_t cmd, bar, error;
739
740 /* Look up our softc and initialize its fields. */
741 sc->mrsas_dev = dev;
742 sc->device_id = pci_get_device(dev);
743
744 mrsas_get_tunables(sc);
745
746 /*
747 * Set up PCI and registers
748 */
749 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
750 if ( (cmd & PCIM_CMD_PORTEN) == 0) {
751 return (ENXIO);
752 }
753 /* Force the busmaster enable bit on. */
754 cmd |= PCIM_CMD_BUSMASTEREN;
755 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
756
757 //bar = pci_read_config(dev, MRSAS_PCI_BAR0, 4);
758 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4);
759
760 sc->reg_res_id = MRSAS_PCI_BAR1; /* BAR1 offset */
761 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY,
762 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE))
763 == NULL) {
764 device_printf(dev, "Cannot allocate PCI registers\n");
765 goto attach_fail;
766 }
767 sc->bus_tag = rman_get_bustag(sc->reg_res);
768 sc->bus_handle = rman_get_bushandle(sc->reg_res);
769
770 /* Intialize mutexes */
771 lockinit(&sc->sim_lock, "mrsas_sim_lock", 0, LK_CANRECURSE);
772 lockinit(&sc->pci_lock, "mrsas_pci_lock", 0, LK_CANRECURSE);
773 lockinit(&sc->io_lock, "mrsas_io_lock", 0, LK_CANRECURSE);
774 lockinit(&sc->aen_lock, "mrsas_aen_lock", 0, LK_CANRECURSE);
775 spin_init(&sc->ioctl_lock, "mrsasioctl");
776 lockinit(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", 0,
777 LK_CANRECURSE);
778 lockinit(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", 0,
779 LK_CANRECURSE);
780 lockinit(&sc->raidmap_lock, "mrsas_raidmap_lock", 0, LK_CANRECURSE);
781
782 /* Intialize linked list */
783 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
784 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
785
786 atomic_set(&sc->fw_outstanding,0);
787
788 sc->io_cmds_highwater = 0;
789
790 /* Create a /dev entry for this device. */
791 sc->mrsas_cdev = make_dev(&mrsas_ops, device_get_unit(dev), UID_ROOT,
792 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
793 device_get_unit(dev));
794 if (sc->mrsas_cdev)
795 sc->mrsas_cdev->si_drv1 = sc;
796
797 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
798 sc->UnevenSpanSupport = 0;
799
800 /* Initialize Firmware */
801 if (mrsas_init_fw(sc) != SUCCESS) {
802 goto attach_fail_fw;
803 }
804
805 /* Register SCSI mid-layer */
806 if ((mrsas_cam_attach(sc) != SUCCESS)) {
807 goto attach_fail_cam;
808 }
809
810 /* Register IRQs */
811 if (mrsas_setup_irq(sc) != SUCCESS) {
812 goto attach_fail_irq;
813 }
814
815 /* Enable Interrupts */
816 mrsas_enable_intr(sc);
817
818 error = kthread_create(mrsas_ocr_thread, sc, &sc->ocr_thread, "mrsas_ocr%d",
819 device_get_unit(sc->mrsas_dev));
820 if (error) {
821 kprintf("Error %d starting rescan thread\n", error);
822 goto attach_fail_irq;
823 }
824
825 mrsas_setup_sysctl(sc);
826
827 /* Initiate AEN (Asynchronous Event Notification)*/
828
829 if (mrsas_start_aen(sc)) {
830 kprintf("Error: start aen failed\n");
831 goto fail_start_aen;
832 }
833
834 return (0);
835
836fail_start_aen:
837attach_fail_irq:
838 mrsas_teardown_intr(sc);
839attach_fail_cam:
840 mrsas_cam_detach(sc);
841attach_fail_fw:
842//attach_fail_raidmap:
843 mrsas_free_mem(sc);
844 lockuninit(&sc->sim_lock);
845 lockuninit(&sc->aen_lock);
846 lockuninit(&sc->pci_lock);
847 lockuninit(&sc->io_lock);
848 spin_uninit(&sc->ioctl_lock);
849 lockuninit(&sc->mpt_cmd_pool_lock);
850 lockuninit(&sc->mfi_cmd_pool_lock);
851 lockuninit(&sc->raidmap_lock);
852attach_fail:
853 destroy_dev(sc->mrsas_cdev);
854 if (sc->reg_res){
855 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
856 sc->reg_res_id, sc->reg_res);
857 }
858 return (ENXIO);
859}
860
861/**
862 * mrsas_detach: De-allocates and teardown resources
863 * input: device struct pointer
864 *
865 * This function is the entry point for device disconnect and detach. It
866 * performs memory de-allocations, shutdown of the controller and various
867 * teardown and destroy resource functions.
868 */
869static int mrsas_detach(device_t dev)
870{
871 struct mrsas_softc *sc;
872 int i = 0;
873
874 sc = device_get_softc(dev);
875 sc->remove_in_progress = 1;
876 if(sc->ocr_thread_active)
877 wakeup(&sc->ocr_chan);
878 while(sc->reset_in_progress){
879 i++;
880 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
881 mrsas_dprint(sc, MRSAS_INFO,
882 "[%2d]waiting for ocr to be finished\n",i);
883 }
884 tsleep(mrsas_detach, 0, "mr_shutdown", hz);
885 }
886 i = 0;
887 while(sc->ocr_thread_active){
888 i++;
889 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
890 mrsas_dprint(sc, MRSAS_INFO,
891 "[%2d]waiting for "
892 "mrsas_ocr thread to quit ocr %d\n",i,
893 sc->ocr_thread_active);
894 }
895 tsleep(mrsas_detach, 0, "mr_shutdown", hz);
896 }
897 mrsas_flush_cache(sc);
898 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN);
899 mrsas_disable_intr(sc);
900 mrsas_cam_detach(sc);
901 mrsas_teardown_intr(sc);
902 mrsas_free_mem(sc);
903 lockuninit(&sc->sim_lock);
904 lockuninit(&sc->aen_lock);
905 lockuninit(&sc->pci_lock);
906 lockuninit(&sc->io_lock);
907 spin_uninit(&sc->ioctl_lock);
908 lockuninit(&sc->mpt_cmd_pool_lock);
909 lockuninit(&sc->mfi_cmd_pool_lock);
910 lockuninit(&sc->raidmap_lock);
911 if (sc->reg_res){
912 bus_release_resource(sc->mrsas_dev,
913 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res);
914 }
915 destroy_dev(sc->mrsas_cdev);
916 if (sc->sysctl_tree != NULL)
917 sysctl_ctx_free(&sc->sysctl_ctx);
918 return (0);
919}
920
921/**
922 * mrsas_free_mem: Frees allocated memory
923 * input: Adapter instance soft state
924 *
925 * This function is called from mrsas_detach() to free previously allocated
926 * memory.
927 */
928void mrsas_free_mem(struct mrsas_softc *sc)
929{
930 int i;
931 u_int32_t max_cmd;
932 struct mrsas_mfi_cmd *mfi_cmd;
933 struct mrsas_mpt_cmd *mpt_cmd;
934
935 /*
936 * Free RAID map memory
937 */
938 for (i=0; i < 2; i++)
939 {
940 if (sc->raidmap_phys_addr[i])
941 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]);
942 if (sc->raidmap_mem[i] != NULL)
943 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]);
944 if (sc->raidmap_tag[i] != NULL)
945 bus_dma_tag_destroy(sc->raidmap_tag[i]);
946 }
947
948 /*
949 * Free version buffer memroy
950 */
951 if (sc->verbuf_phys_addr)
952 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap);
953 if (sc->verbuf_mem != NULL)
954 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap);
955 if (sc->verbuf_tag != NULL)
956 bus_dma_tag_destroy(sc->verbuf_tag);
957
958
959 /*
960 * Free sense buffer memory
961 */
962 if (sc->sense_phys_addr)
963 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap);
964 if (sc->sense_mem != NULL)
965 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap);
966 if (sc->sense_tag != NULL)
967 bus_dma_tag_destroy(sc->sense_tag);
968
969 /*
970 * Free chain frame memory
971 */
972 if (sc->chain_frame_phys_addr)
973 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap);
974 if (sc->chain_frame_mem != NULL)
975 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap);
976 if (sc->chain_frame_tag != NULL)
977 bus_dma_tag_destroy(sc->chain_frame_tag);
978
979 /*
980 * Free IO Request memory
981 */
982 if (sc->io_request_phys_addr)
983 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap);
984 if (sc->io_request_mem != NULL)
985 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap);
986 if (sc->io_request_tag != NULL)
987 bus_dma_tag_destroy(sc->io_request_tag);
988
989 /*
990 * Free Reply Descriptor memory
991 */
992 if (sc->reply_desc_phys_addr)
993 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap);
994 if (sc->reply_desc_mem != NULL)
995 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap);
996 if (sc->reply_desc_tag != NULL)
997 bus_dma_tag_destroy(sc->reply_desc_tag);
998
999 /*
1000 * Free event detail memory
1001 */
1002 if (sc->evt_detail_phys_addr)
1003 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap);
1004 if (sc->evt_detail_mem != NULL)
1005 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap);
1006 if (sc->evt_detail_tag != NULL)
1007 bus_dma_tag_destroy(sc->evt_detail_tag);
1008
1009 /*
1010 * Free MFI frames
1011 */
1012 if (sc->mfi_cmd_list) {
1013 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1014 mfi_cmd = sc->mfi_cmd_list[i];
1015 mrsas_free_frame(sc, mfi_cmd);
1016 }
1017 }
1018 if (sc->mficmd_frame_tag != NULL)
1019 bus_dma_tag_destroy(sc->mficmd_frame_tag);
1020
1021 /*
1022 * Free MPT internal command list
1023 */
1024 max_cmd = sc->max_fw_cmds;
1025 if (sc->mpt_cmd_list) {
1026 for (i = 0; i < max_cmd; i++) {
1027 mpt_cmd = sc->mpt_cmd_list[i];
1028 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap);
1029 kfree(sc->mpt_cmd_list[i], M_MRSAS);
1030 }
1031 kfree(sc->mpt_cmd_list, M_MRSAS);
1032 sc->mpt_cmd_list = NULL;
1033 }
1034
1035 /*
1036 * Free MFI internal command list
1037 */
1038
1039 if (sc->mfi_cmd_list) {
1040 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) {
1041 kfree(sc->mfi_cmd_list[i], M_MRSAS);
1042 }
1043 kfree(sc->mfi_cmd_list, M_MRSAS);
1044 sc->mfi_cmd_list = NULL;
1045 }
1046
1047 /*
1048 * Free request descriptor memory
1049 */
1050 kfree(sc->req_desc, M_MRSAS);
1051 sc->req_desc = NULL;
1052
1053 /*
1054 * Destroy parent tag
1055 */
1056 if (sc->mrsas_parent_tag != NULL)
1057 bus_dma_tag_destroy(sc->mrsas_parent_tag);
1058}
1059
1060/**
1061 * mrsas_teardown_intr: Teardown interrupt
1062 * input: Adapter instance soft state
1063 *
1064 * This function is called from mrsas_detach() to teardown and release
1065 * bus interrupt resourse.
1066 */
1067void mrsas_teardown_intr(struct mrsas_softc *sc)
1068{
1069 if (sc->intr_handle)
1070 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq, sc->intr_handle);
1071 if (sc->mrsas_irq != NULL)
1072 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id, sc->mrsas_irq);
1073 if (sc->irq_type == PCI_INTR_TYPE_MSI)
1074 pci_release_msi(sc->mrsas_dev);
1075 sc->intr_handle = NULL;
1076}
1077
1078/**
1079 * mrsas_suspend: Suspend entry point
1080 * input: Device struct pointer
1081 *
1082 * This function is the entry point for system suspend from the OS.
1083 */
1084static int mrsas_suspend(device_t dev)
1085{
1086 struct mrsas_softc *sc;
1087
1088 sc = device_get_softc(dev);
1089 return (0);
1090}
1091
1092/**
1093 * mrsas_resume: Resume entry point
1094 * input: Device struct pointer
1095 *
1096 * This function is the entry point for system resume from the OS.
1097 */
1098static int mrsas_resume(device_t dev)
1099{
1100 struct mrsas_softc *sc;
1101
1102 sc = device_get_softc(dev);
1103 return (0);
1104}
1105
1106/**
1107 * mrsas_ioctl: IOCtl commands entry point.
1108 *
1109 * This function is the entry point for IOCtls from the OS. It calls the
1110 * appropriate function for processing depending on the command received.
1111 */
1112static int
1113mrsas_ioctl(struct dev_ioctl_args *ap)
1114{
1115 cdev_t dev = ap->a_head.a_dev;
1116 u_long cmd = ap->a_cmd;
1117 caddr_t arg = ap->a_data;
1118 struct mrsas_softc *sc;
1119 int ret = 0, i = 0;
1120
1121 sc = (struct mrsas_softc *)(dev->si_drv1);
1122
1123 if (sc->remove_in_progress) {
1124 mrsas_dprint(sc, MRSAS_INFO,
1125 "Driver remove or shutdown called.\n");
1126 return ENOENT;
1127 }
1128
1129 spin_lock(&sc->ioctl_lock);
1130 if (!sc->reset_in_progress) {
1131 spin_unlock(&sc->ioctl_lock);
1132 goto do_ioctl;
1133 }
1134
1135 /* Release ioclt_lock, and wait for OCR
1136 * to be finished */
1137 spin_unlock(&sc->ioctl_lock);
1138 while(sc->reset_in_progress){
1139 i++;
1140 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
1141 mrsas_dprint(sc, MRSAS_INFO,
1142 "[%2d]waiting for "
1143 "OCR to be finished %d\n",i,
1144 sc->ocr_thread_active);
1145 }
1146 tsleep(mrsas_ioctl, 0, "mr_ioctl", hz);
1147 }
1148
1149do_ioctl:
1150 switch (cmd) {
1151 case MRSAS_IOC_FIRMWARE_PASS_THROUGH:
1152 ret = mrsas_passthru(sc, (void *)arg);
1153 break;
1154 case MRSAS_IOC_SCAN_BUS:
1155 ret = mrsas_bus_scan(sc);
1156 break;
1157 }
1158
1159 return (ret);
1160}
1161
1162/**
1163 * mrsas_setup_irq: Set up interrupt.
1164 * input: Adapter instance soft state
1165 *
1166 * This function sets up interrupts as a bus resource, with flags indicating
1167 * resource permitting contemporaneous sharing and for resource to activate
1168 * atomically.
1169 */
1170static int mrsas_setup_irq(struct mrsas_softc *sc)
1171{
1172 u_int irq_flags;
1173
1174 sc->irq_id = 0;
1175 sc->irq_type = pci_alloc_1intr(sc->mrsas_dev, mrsas_msi_enable,
1176 &sc->irq_id, &irq_flags);
1177
1178 sc->mrsas_irq = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ,
1179 &sc->irq_id, irq_flags);
1180 if (sc->mrsas_irq == NULL){
1181 device_printf(sc->mrsas_dev, "Cannot allocate interrupt\n");
1182 return (FAIL);
1183 }
1184 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq, INTR_MPSAFE,
1185 mrsas_isr, sc, &sc->intr_handle, NULL)) {
1186 device_printf(sc->mrsas_dev, "Cannot set up interrupt\n");
1187 return (FAIL);
1188 }
1189
1190 return (0);
1191}
1192
1193/*
1194 * mrsas_isr: ISR entry point
1195 * input: argument pointer
1196 *
1197 * This function is the interrupt service routine entry point. There
1198 * are two types of interrupts, state change interrupt and response
1199 * interrupt. If an interrupt is not ours, we just return.
1200 */
1201void mrsas_isr(void *arg)
1202{
1203 struct mrsas_softc *sc = (struct mrsas_softc *)arg;
1204 int status;
1205
1206 /* Clear FW state change interrupt */
1207 status = mrsas_clear_intr(sc);
1208
1209 /* Not our interrupt */
1210 if (!status)
1211 return;
1212
1213 /* If we are resetting, bail */
1214 if (test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) {
1215 kprintf(" Entered into ISR when OCR is going active. \n");
1216 mrsas_clear_intr(sc);
1217 return;
1218 }
1219 /* Process for reply request and clear response interrupt */
1220 if (mrsas_complete_cmd(sc) != SUCCESS)
1221 mrsas_clear_intr(sc);
1222
1223 return;
1224}
1225
1226/*
1227 * mrsas_complete_cmd: Process reply request
1228 * input: Adapter instance soft state
1229 *
1230 * This function is called from mrsas_isr() to process reply request and
1231 * clear response interrupt. Processing of the reply request entails
1232 * walking through the reply descriptor array for the command request
1233 * pended from Firmware. We look at the Function field to determine
1234 * the command type and perform the appropriate action. Before we
1235 * return, we clear the response interrupt.
1236 */
1237static int mrsas_complete_cmd(struct mrsas_softc *sc)
1238{
1239 Mpi2ReplyDescriptorsUnion_t *desc;
1240 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
1241 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req;
1242 struct mrsas_mpt_cmd *cmd_mpt;
1243 struct mrsas_mfi_cmd *cmd_mfi;
1244 u_int8_t arm, reply_descript_type;
1245 u_int16_t smid, num_completed;
1246 u_int8_t status, extStatus;
1247 union desc_value desc_val;
1248 PLD_LOAD_BALANCE_INFO lbinfo;
1249 u_int32_t device_id;
1250 int threshold_reply_count = 0;
1251
1252
1253 /* If we have a hardware error, not need to continue */
1254 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
1255 return (DONE);
1256
1257 desc = sc->reply_desc_mem;
1258 desc += sc->last_reply_idx;
1259
1260 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1261
1262 desc_val.word = desc->Words;
1263 num_completed = 0;
1264
1265 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1266
1267 /* Find our reply descriptor for the command and process */
1268 while((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF))
1269 {
1270 smid = reply_desc->SMID;
1271 cmd_mpt = sc->mpt_cmd_list[smid -1];
1272 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *)cmd_mpt->io_request;
1273
1274 status = scsi_io_req->RaidContext.status;
1275 extStatus = scsi_io_req->RaidContext.exStatus;
1276
1277 switch (scsi_io_req->Function)
1278 {
1279 case MPI2_FUNCTION_SCSI_IO_REQUEST : /*Fast Path IO.*/
1280 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id;
1281 lbinfo = &sc->load_balance_info[device_id];
1282 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) {
1283 arm = lbinfo->raid1DevHandle[0] == scsi_io_req->DevHandle ? 0 : 1;
1284 atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1285 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG;
1286 }
1287 //Fall thru and complete IO
1288 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST:
1289 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus);
1290 mrsas_cmd_done(sc, cmd_mpt);
1291 scsi_io_req->RaidContext.status = 0;
1292 scsi_io_req->RaidContext.exStatus = 0;
1293 atomic_dec(&sc->fw_outstanding);
1294 break;
1295 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1296 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx];
1297 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status);
1298 cmd_mpt->flags = 0;
1299 mrsas_release_mpt_cmd(cmd_mpt);
1300 break;
1301 }
1302
1303 sc->last_reply_idx++;
1304 if (sc->last_reply_idx >= sc->reply_q_depth)
1305 sc->last_reply_idx = 0;
1306
1307 desc->Words = ~((uint64_t)0x00); /* set it back to all 0xFFFFFFFFs */
1308 num_completed++;
1309 threshold_reply_count++;
1310
1311 /* Get the next reply descriptor */
1312 if (!sc->last_reply_idx)
1313 desc = sc->reply_desc_mem;
1314 else
1315 desc++;
1316
1317 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1318 desc_val.word = desc->Words;
1319
1320 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1321
1322 if(reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1323 break;
1324
1325 /*
1326 * Write to reply post index after completing threshold reply count
1327 * and still there are more replies in reply queue pending to be
1328 * completed.
1329 */
1330 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
1331 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),
1332 sc->last_reply_idx);
1333 threshold_reply_count = 0;
1334 }
1335 }
1336
1337 /* No match, just return */
1338 if (num_completed == 0)
1339 return (DONE);
1340
1341 /* Clear response interrupt */
1342 mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index),sc->last_reply_idx);
1343
1344 return(0);
1345}
1346
1347/*
1348 * mrsas_map_mpt_cmd_status: Allocate DMAable memory.
1349 * input: Adapter instance soft state
1350 *
1351 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO.
1352 * It checks the command status and maps the appropriate CAM status for the CCB.
1353 */
1354void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus)
1355{
1356 struct mrsas_softc *sc = cmd->sc;
1357 u_int8_t *sense_data;
1358
1359 switch (status) {
1360 case MFI_STAT_OK:
1361 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
1362 break;
1363 case MFI_STAT_SCSI_IO_FAILED:
1364 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1365 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR;
1366 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data;
1367 if (sense_data) {
1368 /* For now just copy 18 bytes back */
1369 memcpy(sense_data, cmd->sense, 18);
1370 cmd->ccb_ptr->csio.sense_len = 18;
1371 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID;
1372 }
1373 break;
1374 case MFI_STAT_LD_OFFLINE:
1375 case MFI_STAT_DEVICE_NOT_FOUND:
1376 if (cmd->ccb_ptr->ccb_h.target_lun)
1377 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID;
1378 else
1379 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE;
1380 break;
1381 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1382 /*send status to CAM layer to retry sending command without
1383 * decrementing retry counter*/
1384 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ;
1385 break;
1386 default:
1387 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status);
1388 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR;
1389 cmd->ccb_ptr->csio.scsi_status = status;
1390 }
1391 return;
1392}
1393
1394/*
1395 * mrsas_alloc_mem: Allocate DMAable memory.
1396 * input: Adapter instance soft state
1397 *
1398 * This function creates the parent DMA tag and allocates DMAable memory.
1399 * DMA tag describes constraints of DMA mapping. Memory allocated is mapped
1400 * into Kernel virtual address. Callback argument is physical memory address.
1401 */
1402static int mrsas_alloc_mem(struct mrsas_softc *sc)
1403{
1404 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size,
1405 chain_frame_size, evt_detail_size;
1406
1407 /*
1408 * Allocate parent DMA tag
1409 */
1410 if (bus_dma_tag_create(NULL, /* parent */
1411 1, /* alignment */
1412 0, /* boundary */
1413 BUS_SPACE_MAXADDR, /* lowaddr */
1414 BUS_SPACE_MAXADDR, /* highaddr */
1415 NULL, NULL, /* filter, filterarg */
1416 MRSAS_MAX_IO_SIZE,/* maxsize */
1417 MRSAS_MAX_SGL, /* nsegments */
1418 MRSAS_MAX_IO_SIZE,/* maxsegsize */
1419 0, /* flags */
1420 &sc->mrsas_parent_tag /* tag */
1421 )) {
1422 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n");
1423 return(ENOMEM);
1424 }
1425
1426 /*
1427 * Allocate for version buffer
1428 */
1429 verbuf_size = MRSAS_MAX_NAME_LENGTH*(sizeof(bus_addr_t));
1430 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1431 1, 0, // algnmnt, boundary
1432 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1433 BUS_SPACE_MAXADDR, // highaddr
1434 NULL, NULL, // filter, filterarg
1435 verbuf_size, // maxsize
1436 1, // msegments
1437 verbuf_size, // maxsegsize
1438 BUS_DMA_ALLOCNOW, // flags
1439 &sc->verbuf_tag)) {
1440 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n");
1441 return (ENOMEM);
1442 }
1443 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem,
1444 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) {
1445 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n");
1446 return (ENOMEM);
1447 }
1448 bzero(sc->verbuf_mem, verbuf_size);
1449 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem,
1450 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)){
1451 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n");
1452 return(ENOMEM);
1453 }
1454
1455 /*
1456 * Allocate IO Request Frames
1457 */
1458 io_req_size = sc->io_frames_alloc_sz;
1459 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1460 16, 0, // algnmnt, boundary
1461 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1462 BUS_SPACE_MAXADDR, // highaddr
1463 NULL, NULL, // filter, filterarg
1464 io_req_size, // maxsize
1465 1, // msegments
1466 io_req_size, // maxsegsize
1467 BUS_DMA_ALLOCNOW, // flags
1468 &sc->io_request_tag)) {
1469 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n");
1470 return (ENOMEM);
1471 }
1472 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem,
1473 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) {
1474 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n");
1475 return (ENOMEM);
1476 }
1477 bzero(sc->io_request_mem, io_req_size);
1478 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap,
1479 sc->io_request_mem, io_req_size, mrsas_addr_cb,
1480 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) {
1481 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n");
1482 return (ENOMEM);
1483 }
1484
1485 /*
1486 * Allocate Chain Frames
1487 */
1488 chain_frame_size = sc->chain_frames_alloc_sz;
1489 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1490 4, 0, // algnmnt, boundary
1491 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1492 BUS_SPACE_MAXADDR, // highaddr
1493 NULL, NULL, // filter, filterarg
1494 chain_frame_size, // maxsize
1495 1, // msegments
1496 chain_frame_size, // maxsegsize
1497 BUS_DMA_ALLOCNOW, // flags
1498 &sc->chain_frame_tag)) {
1499 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n");
1500 return (ENOMEM);
1501 }
1502 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem,
1503 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) {
1504 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n");
1505 return (ENOMEM);
1506 }
1507 bzero(sc->chain_frame_mem, chain_frame_size);
1508 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap,
1509 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb,
1510 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) {
1511 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n");
1512 return (ENOMEM);
1513 }
1514
1515 /*
1516 * Allocate Reply Descriptor Array
1517 */
1518 reply_desc_size = sc->reply_alloc_sz;
1519 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1520 16, 0, // algnmnt, boundary
1521 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1522 BUS_SPACE_MAXADDR, // highaddr
1523 NULL, NULL, // filter, filterarg
1524 reply_desc_size, // maxsize
1525 1, // msegments
1526 reply_desc_size, // maxsegsize
1527 BUS_DMA_ALLOCNOW, // flags
1528 &sc->reply_desc_tag)) {
1529 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n");
1530 return (ENOMEM);
1531 }
1532 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem,
1533 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) {
1534 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n");
1535 return (ENOMEM);
1536 }
1537 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap,
1538 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb,
1539 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) {
1540 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n");
1541 return (ENOMEM);
1542 }
1543
1544 /*
1545 * Allocate Sense Buffer Array. Keep in lower 4GB
1546 */
1547 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN;
1548 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1549 64, 0, // algnmnt, boundary
1550 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1551 BUS_SPACE_MAXADDR, // highaddr
1552 NULL, NULL, // filter, filterarg
1553 sense_size, // maxsize
1554 1, // nsegments
1555 sense_size, // maxsegsize
1556 BUS_DMA_ALLOCNOW, // flags
1557 &sc->sense_tag)) {
1558 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n");
1559 return (ENOMEM);
1560 }
1561 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem,
1562 BUS_DMA_NOWAIT, &sc->sense_dmamap)) {
1563 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n");
1564 return (ENOMEM);
1565 }
1566 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap,
1567 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr,
1568 BUS_DMA_NOWAIT)){
1569 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n");
1570 return (ENOMEM);
1571 }
1572
1573 /*
1574 * Allocate for Event detail structure
1575 */
1576 evt_detail_size = sizeof(struct mrsas_evt_detail);
1577 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1578 1, 0, // algnmnt, boundary
1579 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1580 BUS_SPACE_MAXADDR, // highaddr
1581 NULL, NULL, // filter, filterarg
1582 evt_detail_size, // maxsize
1583 1, // msegments
1584 evt_detail_size, // maxsegsize
1585 BUS_DMA_ALLOCNOW, // flags
1586 &sc->evt_detail_tag)) {
1587 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n");
1588 return (ENOMEM);
1589 }
1590 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem,
1591 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) {
1592 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n");
1593 return (ENOMEM);
1594 }
1595 bzero(sc->evt_detail_mem, evt_detail_size);
1596 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap,
1597 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb,
1598 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) {
1599 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n");
1600 return (ENOMEM);
1601 }
1602
1603
1604 /*
1605 * Create a dma tag for data buffers; size will be the maximum
1606 * possible I/O size (280kB).
1607 */
1608 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1609 1, // alignment
1610 0, // boundary
1611 BUS_SPACE_MAXADDR, // lowaddr
1612 BUS_SPACE_MAXADDR, // highaddr
1613 NULL, NULL, // filter, filterarg
1614 MRSAS_MAX_IO_SIZE, // maxsize
1615 MRSAS_MAX_SGL, // nsegments
1616 MRSAS_MAX_IO_SIZE, // maxsegsize
1617 BUS_DMA_ALLOCNOW, // flags
1618 &sc->data_tag)) {
1619 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n");
1620 return(ENOMEM);
1621 }
1622
1623 return(0);
1624}
1625
1626/*
1627 * mrsas_addr_cb: Callback function of bus_dmamap_load()
1628 * input: callback argument,
1629 * machine dependent type that describes DMA segments,
1630 * number of segments,
1631 * error code.
1632 *
1633 * This function is for the driver to receive mapping information resultant
1634 * of the bus_dmamap_load(). The information is actually not being used,
1635 * but the address is saved anyway.
1636 */
1637void
1638mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1639{
1640 bus_addr_t *addr;
1641
1642 addr = arg;
1643 *addr = segs[0].ds_addr;
1644}
1645
1646/*
1647 * mrsas_setup_raidmap: Set up RAID map.
1648 * input: Adapter instance soft state
1649 *
1650 * Allocate DMA memory for the RAID maps and perform setup.
1651 */
1652static int mrsas_setup_raidmap(struct mrsas_softc *sc)
1653{
1654 sc->map_sz = sizeof(MR_FW_RAID_MAP) +
1655 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
1656
1657 for (int i=0; i < 2; i++)
1658 {
1659 if (bus_dma_tag_create(sc->mrsas_parent_tag, // parent
1660 4, 0, // algnmnt, boundary
1661 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1662 BUS_SPACE_MAXADDR, // highaddr
1663 NULL, NULL, // filter, filterarg
1664 sc->map_sz, // maxsize
1665 1, // nsegments
1666 sc->map_sz, // maxsegsize
1667 BUS_DMA_ALLOCNOW, // flags
1668 &sc->raidmap_tag[i])) {
1669 device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n");
1670 return (ENOMEM);
1671 }
1672 if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i],
1673 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) {
1674 device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n");
1675 return (ENOMEM);
1676 }
1677 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i],
1678 sc->raidmap_mem[i], sc->map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i],
1679 BUS_DMA_NOWAIT)){
1680 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n");
1681 return (ENOMEM);
1682 }
1683 if (!sc->raidmap_mem[i]) {
1684 device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n");
1685 return (ENOMEM);
1686 }
1687 }
1688
1689 if (!mrsas_get_map_info(sc))
1690 mrsas_sync_map_info(sc);
1691
1692 return (0);
1693}
1694
1695/**
1696 * mrsas_init_fw: Initialize Firmware
1697 * input: Adapter soft state
1698 *
1699 * Calls transition_to_ready() to make sure Firmware is in operational
1700 * state and calls mrsas_init_adapter() to send IOC_INIT command to
1701 * Firmware. It issues internal commands to get the controller info
1702 * after the IOC_INIT command response is received by Firmware.
1703 * Note: code relating to get_pdlist, get_ld_list and max_sectors
1704 * are currently not being used, it is left here as placeholder.
1705 */
1706static int mrsas_init_fw(struct mrsas_softc *sc)
1707{
1708 u_int32_t max_sectors_1;
1709 u_int32_t max_sectors_2;
1710 u_int32_t tmp_sectors;
1711 struct mrsas_ctrl_info *ctrl_info;
1712
1713 int ret, ocr = 0;
1714
1715
1716 /* Make sure Firmware is ready */
1717 ret = mrsas_transition_to_ready(sc, ocr);
1718 if (ret != SUCCESS) {
1719 return(ret);
1720 }
1721
1722 /* Get operational params, sge flags, send init cmd to ctlr */
1723 if (mrsas_init_adapter(sc) != SUCCESS){
1724 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n");
1725 return(1);
1726 }
1727
1728 /* Allocate internal commands for pass-thru */
1729 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS){
1730 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
1731 return(1);
1732 }
1733
1734 if (mrsas_setup_raidmap(sc) != SUCCESS) {
1735 device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
1736 return(1);
1737 }
1738
1739 /* For pass-thru, get PD/LD list and controller info */
1740 memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
1741 mrsas_get_pd_list(sc);
1742
1743 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
1744 mrsas_get_ld_list(sc);
1745
1746 //memset(sc->log_to_span, 0, MRSAS_MAX_LD * sizeof(LD_SPAN_INFO));
1747
1748 ctrl_info = kmalloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT);
1749
1750 /*
1751 * Compute the max allowed sectors per IO: The controller info has two
1752 * limits on max sectors. Driver should use the minimum of these two.
1753 *
1754 * 1 << stripe_sz_ops.min = max sectors per strip
1755 *
1756 * Note that older firmwares ( < FW ver 30) didn't report information
1757 * to calculate max_sectors_1. So the number ended up as zero always.
1758 */
1759 tmp_sectors = 0;
1760 if (ctrl_info && !mrsas_get_ctrl_info(sc, ctrl_info)) {
1761 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
1762 ctrl_info->max_strips_per_io;
1763 max_sectors_2 = ctrl_info->max_request_size;
1764 tmp_sectors = min(max_sectors_1 , max_sectors_2);
1765 sc->disableOnlineCtrlReset =
1766 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
1767 sc->UnevenSpanSupport =
1768 ctrl_info->adapterOperations2.supportUnevenSpans;
1769 if(sc->UnevenSpanSupport) {
1770 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n",
1771 sc->UnevenSpanSupport);
1772 if (MR_ValidateMapInfo(sc))
1773 sc->fast_path_io = 1;
1774 else
1775 sc->fast_path_io = 0;
1776
1777 }
1778 }
1779 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512;
1780
1781 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors))
1782 sc->max_sectors_per_req = tmp_sectors;
1783
1784 if (ctrl_info)
1785 kfree(ctrl_info, M_MRSAS);
1786
1787 return(0);
1788}
1789
1790/**
1791 * mrsas_init_adapter: Initializes the adapter/controller
1792 * input: Adapter soft state
1793 *
1794 * Prepares for the issuing of the IOC Init cmd to FW for initializing the
1795 * ROC/controller. The FW register is read to determined the number of
1796 * commands that is supported. All memory allocations for IO is based on
1797 * max_cmd. Appropriate calculations are performed in this function.
1798 */
1799int mrsas_init_adapter(struct mrsas_softc *sc)
1800{
1801 uint32_t status;
1802 u_int32_t max_cmd;
1803 int ret;
1804
1805 /* Read FW status register */
1806 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
1807
1808 /* Get operational params from status register */
1809 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK;
1810
1811 /* Decrement the max supported by 1, to correlate with FW */
1812 sc->max_fw_cmds = sc->max_fw_cmds-1;
1813 max_cmd = sc->max_fw_cmds;
1814
1815 /* Determine allocation size of command frames */
1816 sc->reply_q_depth = ((max_cmd *2 +1 +15)/16*16);
1817 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
1818 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
1819 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
1820 sc->chain_frames_alloc_sz = 1024 * max_cmd;
1821 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1822 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL))/16;
1823
1824 sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
1825 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
1826
1827 /* Used for pass thru MFI frame (DCMD) */
1828 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)/16;
1829
1830 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1831 sizeof(MPI2_SGE_IO_UNION))/16;
1832
1833 sc->last_reply_idx = 0;
1834
1835 ret = mrsas_alloc_mem(sc);
1836 if (ret != SUCCESS)
1837 return(ret);
1838
1839 ret = mrsas_alloc_mpt_cmds(sc);
1840 if (ret != SUCCESS)
1841 return(ret);
1842
1843 ret = mrsas_ioc_init(sc);
1844 if (ret != SUCCESS)
1845 return(ret);
1846
1847
1848 return(0);
1849}
1850
1851/**
1852 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command
1853 * input: Adapter soft state
1854 *
1855 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller.
1856 */
1857int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc)
1858{
1859 int ioc_init_size;
1860
1861 /* Allocate IOC INIT command */
1862 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST);
1863 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
1864 1, 0, // algnmnt, boundary
1865 BUS_SPACE_MAXADDR_32BIT,// lowaddr
1866 BUS_SPACE_MAXADDR, // highaddr
1867 NULL, NULL, // filter, filterarg
1868 ioc_init_size, // maxsize
1869 1, // msegments
1870 ioc_init_size, // maxsegsize
1871 BUS_DMA_ALLOCNOW, // flags
1872 &sc->ioc_init_tag)) {
1873 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n");
1874 return (ENOMEM);
1875 }
1876 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem,
1877 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) {
1878 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n");
1879 return (ENOMEM);
1880 }
1881 bzero(sc->ioc_init_mem, ioc_init_size);
1882 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap,
1883 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb,
1884 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) {
1885 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n");
1886 return (ENOMEM);
1887 }
1888
1889 return (0);
1890}
1891
1892/**
1893 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command
1894 * input: Adapter soft state
1895 *
1896 * Deallocates memory of the IOC Init cmd.
1897 */
1898void mrsas_free_ioc_cmd(struct mrsas_softc *sc)
1899{
1900 if (sc->ioc_init_phys_mem)
1901 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap);
1902 if (sc->ioc_init_mem != NULL)
1903 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap);
1904 if (sc->ioc_init_tag != NULL)
1905 bus_dma_tag_destroy(sc->ioc_init_tag);
1906}
1907
1908/**
1909 * mrsas_ioc_init: Sends IOC Init command to FW
1910 * input: Adapter soft state
1911 *
1912 * Issues the IOC Init cmd to FW to initialize the ROC/controller.
1913 */
1914int mrsas_ioc_init(struct mrsas_softc *sc)
1915{
1916 struct mrsas_init_frame *init_frame;
1917 pMpi2IOCInitRequest_t IOCInitMsg;
1918 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc;
1919 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME;
1920 bus_addr_t phys_addr;
1921 int i, retcode = 0;
1922
1923 /* Allocate memory for the IOC INIT command */
1924 if (mrsas_alloc_ioc_cmd(sc)) {
1925 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n");
1926 return(1);
1927 }
1928
1929 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) +1024);
1930 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT;
1931 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1932 IOCInitMsg->MsgVersion = MPI2_VERSION;
1933 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION;
1934 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4;
1935 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth;
1936 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
1937 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
1938
1939 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
1940 init_frame->cmd = MFI_CMD_INIT;
1941 init_frame->cmd_status = 0xFF;
1942 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
1943
1944 if (sc->verbuf_mem) {
1945 ksnprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION)+2,"%s\n",
1946 MRSAS_VERSION);
1947 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr;
1948 init_frame->driver_ver_hi = 0;
1949 }
1950
1951 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
1952 init_frame->queue_info_new_phys_addr_lo = phys_addr;
1953 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
1954
1955 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem;
1956 req_desc.MFAIo.RequestFlags =
1957 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1958
1959 mrsas_disable_intr(sc);
1960 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n");
1961 //device_printf(sc->mrsas_dev, "Issuing IOC INIT command to FW.\n");del?
1962 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high);
1963
1964 /*
1965 * Poll response timer to wait for Firmware response. While this
1966 * timer with the DELAY call could block CPU, the time interval for
1967 * this is only 1 millisecond.
1968 */
1969 if (init_frame->cmd_status == 0xFF) {
1970 for (i=0; i < (max_wait * 1000); i++){
1971 if (init_frame->cmd_status == 0xFF)
1972 DELAY(1000);
1973 else
1974 break;
1975 }
1976 }
1977
1978 if (init_frame->cmd_status == 0)
1979 mrsas_dprint(sc, MRSAS_OCR,
1980 "IOC INIT response received from FW.\n");
1981 //device_printf(sc->mrsas_dev, "IOC INIT response received from FW.\n");del?
1982 else
1983 {
1984 if (init_frame->cmd_status == 0xFF)
1985 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait);
1986 else
1987 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status);
1988 retcode = 1;
1989 }
1990
1991 mrsas_free_ioc_cmd(sc);
1992 return (retcode);
1993}
1994
1995/**
1996 * mrsas_alloc_mpt_cmds: Allocates the command packets
1997 * input: Adapter instance soft state
1998 *
1999 * This function allocates the internal commands for IOs. Each command that is
2000 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd.
2001 * An array is allocated with mrsas_mpt_cmd context. The free commands are
2002 * maintained in a linked list (cmd pool). SMID value range is from 1 to
2003 * max_fw_cmds.
2004 */
2005int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
2006{
2007 int i, j;
2008 u_int32_t max_cmd;
2009 struct mrsas_mpt_cmd *cmd;
2010 pMpi2ReplyDescriptorsUnion_t reply_desc;
2011 u_int32_t offset, chain_offset, sense_offset;
2012 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys;
2013 u_int8_t *io_req_base, *chain_frame_base, *sense_base;
2014
2015 max_cmd = sc->max_fw_cmds;
2016
2017 sc->req_desc = kmalloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT);
2018 if (!sc->req_desc) {
2019 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n");
2020 return(ENOMEM);
2021 }
2022 memset(sc->req_desc, 0, sc->request_alloc_sz);
2023
2024 /*
2025 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. Allocate the
2026 * dynamic array first and then allocate individual commands.
2027 */
2028 sc->mpt_cmd_list = kmalloc(sizeof(struct mrsas_mpt_cmd*)*max_cmd, M_MRSAS, M_NOWAIT);
2029 if (!sc->mpt_cmd_list) {
2030 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n");
2031 return(ENOMEM);
2032 }
2033 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *)*max_cmd);
2034 for (i = 0; i < max_cmd; i++) {
2035 sc->mpt_cmd_list[i] = kmalloc(sizeof(struct mrsas_mpt_cmd),
2036 M_MRSAS, M_NOWAIT);
2037 if (!sc->mpt_cmd_list[i]) {
2038 for (j = 0; j < i; j++)
2039 kfree(sc->mpt_cmd_list[j],M_MRSAS);
2040 kfree(sc->mpt_cmd_list, M_MRSAS);
2041 sc->mpt_cmd_list = NULL;
2042 return(ENOMEM);
2043 }
2044 }
2045
2046 io_req_base = (u_int8_t*)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2047 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
2048 chain_frame_base = (u_int8_t*)sc->chain_frame_mem;
2049 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr;
2050 sense_base = (u_int8_t*)sc->sense_mem;
2051 sense_base_phys = (bus_addr_t)sc->sense_phys_addr;
2052 for (i = 0; i < max_cmd; i++) {
2053 cmd = sc->mpt_cmd_list[i];
2054 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
2055 chain_offset = 1024 * i;
2056 sense_offset = MRSAS_SENSE_LEN * i;
2057 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
2058 cmd->index = i + 1;
2059 cmd->ccb_ptr = NULL;
2060 callout_init(&cmd->cm_callout);
2061 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX;
2062 cmd->sc = sc;
2063 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset);
2064 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST));
2065 cmd->io_request_phys_addr = io_req_base_phys + offset;
2066 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset);
2067 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset;
2068 cmd->sense = sense_base + sense_offset;
2069 cmd->sense_phys_addr = sense_base_phys + sense_offset;
2070 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) {
2071 return(FAIL);
2072 }
2073 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next);
2074 }
2075
2076 /* Initialize reply descriptor array to 0xFFFFFFFF */
2077 reply_desc = sc->reply_desc_mem;
2078 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2079 reply_desc->Words = MRSAS_ULONG_MAX;
2080 }
2081 return(0);
2082}
2083
2084/**
2085 * mrsas_fire_cmd: Sends command to FW
2086 * input: Adapter soft state
2087 * request descriptor address low
2088 * request descriptor address high
2089 *
2090 * This functions fires the command to Firmware by writing to the
2091 * inbound_low_queue_port and inbound_high_queue_port.
2092 */
2093void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo,
2094 u_int32_t req_desc_hi)
2095{
2096 lockmgr(&sc->pci_lock, LK_EXCLUSIVE);
2097 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port),
2098 req_desc_lo);
2099 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port),
2100 req_desc_hi);
2101 lockmgr(&sc->pci_lock, LK_RELEASE);
2102}
2103
2104/**
2105 * mrsas_transition_to_ready: Move FW to Ready state
2106 * input: Adapter instance soft state
2107 *
2108 * During the initialization, FW passes can potentially be in any one of
2109 * several possible states. If the FW in operational, waiting-for-handshake
2110 * states, driver must take steps to bring it to ready state. Otherwise, it
2111 * has to wait for the ready state.
2112 */
2113int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr)
2114{
2115 int i;
2116 u_int8_t max_wait;
2117 u_int32_t val, fw_state;
2118 u_int32_t cur_state;
2119 u_int32_t abs_state, curr_abs_state;
2120
2121 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2122 fw_state = val & MFI_STATE_MASK;
2123 max_wait = MRSAS_RESET_WAIT_TIME;
2124
2125 if (fw_state != MFI_STATE_READY)
2126 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n");
2127
2128 while (fw_state != MFI_STATE_READY) {
2129 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad));
2130 switch (fw_state) {
2131 case MFI_STATE_FAULT:
2132 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n");
2133 if (ocr) {
2134 cur_state = MFI_STATE_FAULT;
2135 break;
2136 }
2137 else
2138 return -ENODEV;
2139 case MFI_STATE_WAIT_HANDSHAKE:
2140 /* Set the CLR bit in inbound doorbell */
2141 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2142 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG);
2143 cur_state = MFI_STATE_WAIT_HANDSHAKE;
2144 break;
2145 case MFI_STATE_BOOT_MESSAGE_PENDING:
2146 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2147 MFI_INIT_HOTPLUG);
2148 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
2149 break;
2150 case MFI_STATE_OPERATIONAL:
2151 /* Bring it to READY state; assuming max wait 10 secs */
2152 mrsas_disable_intr(sc);
2153 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS);
2154 for (i=0; i < max_wait * 1000; i++) {
2155 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1)
2156 DELAY(1000);
2157 else
2158 break;
2159 }
2160 cur_state = MFI_STATE_OPERATIONAL;
2161 break;
2162 case MFI_STATE_UNDEFINED:
2163 /* This state should not last for more than 2 seconds */
2164 cur_state = MFI_STATE_UNDEFINED;
2165 break;
2166 case MFI_STATE_BB_INIT:
2167 cur_state = MFI_STATE_BB_INIT;
2168 break;
2169 case MFI_STATE_FW_INIT:
2170 cur_state = MFI_STATE_FW_INIT;
2171 break;
2172 case MFI_STATE_FW_INIT_2:
2173 cur_state = MFI_STATE_FW_INIT_2;
2174 break;
2175 case MFI_STATE_DEVICE_SCAN:
2176 cur_state = MFI_STATE_DEVICE_SCAN;
2177 break;
2178 case MFI_STATE_FLUSH_CACHE:
2179 cur_state = MFI_STATE_FLUSH_CACHE;
2180 break;
2181 default:
2182 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state);
2183 return -ENODEV;
2184 }
2185
2186 /*
2187 * The cur_state should not last for more than max_wait secs
2188 */
2189 for (i = 0; i < (max_wait * 1000); i++) {
2190 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2191 outbound_scratch_pad))& MFI_STATE_MASK);
2192 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2193 outbound_scratch_pad));
2194 if (abs_state == curr_abs_state)
2195 DELAY(1000);
2196 else
2197 break;
2198 }
2199
2200 /*
2201 * Return error if fw_state hasn't changed after max_wait
2202 */
2203 if (curr_abs_state == abs_state) {
2204 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed "
2205 "in %d secs\n", fw_state, max_wait);
2206 return -ENODEV;
2207 }
2208 }
2209 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n");
2210 //device_printf(sc->mrsas_dev, "FW now in Ready state\n");del?
2211 return 0;
2212}
2213
2214/**
2215 * mrsas_get_mfi_cmd: Get a cmd from free command pool
2216 * input: Adapter soft state
2217 *
2218 * This function removes an MFI command from the command list.
2219 */
2220struct mrsas_mfi_cmd* mrsas_get_mfi_cmd(struct mrsas_softc *sc)
2221{
2222 struct mrsas_mfi_cmd *cmd = NULL;
2223
2224 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE);
2225 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)){
2226 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head);
2227 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next);
2228 }
2229 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE);
2230
2231 return cmd;
2232}
2233
2234/**
2235 * mrsas_ocr_thread Thread to handle OCR/Kill Adapter.
2236 * input: Adapter Context.
2237 *
2238 * This function will check FW status register and flag
2239 * do_timeout_reset flag. It will do OCR/Kill adapter if
2240 * FW is in fault state or IO timed out has trigger reset.
2241 */
2242static void
2243mrsas_ocr_thread(void *arg)
2244{
2245 struct mrsas_softc *sc;
2246 u_int32_t fw_status, fw_state;
2247
2248 sc = (struct mrsas_softc *)arg;
2249
2250 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__);
2251
2252 sc->ocr_thread_active = 1;
2253 lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
2254 for (;;) {
2255 /* Sleep for 1 second and check the queue status*/
2256 lksleep(&sc->ocr_chan, &sc->sim_lock, 0,
2257 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
2258 if (sc->remove_in_progress) {
2259 mrsas_dprint(sc, MRSAS_OCR,
2260 "Exit due to shutdown from %s\n", __func__);
2261 break;
2262 }
2263 fw_status = mrsas_read_reg(sc,
2264 offsetof(mrsas_reg_set, outbound_scratch_pad));
2265 fw_state = fw_status & MFI_STATE_MASK;
2266 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
2267 device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
2268 sc->do_timedout_reset?"IO Timeout":
2269 "FW fault detected");
2270 spin_lock(&sc->ioctl_lock);
2271 sc->reset_in_progress = 1;
2272 sc->reset_count++;
2273 spin_unlock(&sc->ioctl_lock);
2274 mrsas_xpt_freeze(sc);
2275 mrsas_reset_ctrl(sc);
2276 mrsas_xpt_release(sc);
2277 sc->reset_in_progress = 0;
2278 sc->do_timedout_reset = 0;
2279 }
2280 }
2281 lockmgr(&sc->sim_lock, LK_RELEASE);
2282 sc->ocr_thread_active = 0;
2283 kthread_exit();
2284}
2285
2286/**
2287 * mrsas_reset_reply_desc Reset Reply descriptor as part of OCR.
2288 * input: Adapter Context.
2289 *
2290 * This function will clear reply descriptor so that post OCR
2291 * driver and FW will lost old history.
2292 */
2293void mrsas_reset_reply_desc(struct mrsas_softc *sc)
2294{
2295 int i;
2296 pMpi2ReplyDescriptorsUnion_t reply_desc;
2297
2298 sc->last_reply_idx = 0;
2299 reply_desc = sc->reply_desc_mem;
2300 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) {
2301 reply_desc->Words = MRSAS_ULONG_MAX;
2302 }
2303}
2304
2305/**
2306 * mrsas_reset_ctrl Core function to OCR/Kill adapter.
2307 * input: Adapter Context.
2308 *
2309 * This function will run from thread context so that it can sleep.
2310 * 1. Do not handle OCR if FW is in HW critical error.
2311 * 2. Wait for outstanding command to complete for 180 seconds.
2312 * 3. If #2 does not find any outstanding command Controller is in working
2313 * state, so skip OCR.
2314 * Otherwise, do OCR/kill Adapter based on flag disableOnlineCtrlReset.
2315 * 4. Start of the OCR, return all SCSI command back to CAM layer which has
2316 * ccb_ptr.
2317 * 5. Post OCR, Re-fire Managment command and move Controller to Operation
2318 * state.
2319 */
2320int mrsas_reset_ctrl(struct mrsas_softc *sc)
2321{
2322 int retval = SUCCESS, i, j, retry = 0;
2323 u_int32_t host_diag, abs_state, status_reg, reset_adapter;
2324 union ccb *ccb;
2325 struct mrsas_mfi_cmd *mfi_cmd;
2326 struct mrsas_mpt_cmd *mpt_cmd;
2327 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2328
2329 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
2330 device_printf(sc->mrsas_dev,
2331 "mrsas: Hardware critical error, returning FAIL.\n");
2332 return FAIL;
2333 }
2334
2335 set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2336 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
2337 mrsas_disable_intr(sc);
2338 DELAY(1000 * 1000);
2339
2340 /* First try waiting for commands to complete */
2341 if (mrsas_wait_for_outstanding(sc)) {
2342 mrsas_dprint(sc, MRSAS_OCR,
2343 "resetting adapter from %s.\n",
2344 __func__);
2345 /* Now return commands back to the CAM layer */
2346 for (i = 0 ; i < sc->max_fw_cmds; i++) {
2347 mpt_cmd = sc->mpt_cmd_list[i];
2348 if (mpt_cmd->ccb_ptr) {
2349 ccb = (union ccb *)(mpt_cmd->ccb_ptr);
2350 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
2351 mrsas_cmd_done(sc, mpt_cmd);
2352 atomic_dec(&sc->fw_outstanding);
2353 }
2354 }
2355
2356 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2357 outbound_scratch_pad));
2358 abs_state = status_reg & MFI_STATE_MASK;
2359 reset_adapter = status_reg & MFI_RESET_ADAPTER;
2360 if (sc->disableOnlineCtrlReset ||
2361 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2362 /* Reset not supported, kill adapter */
2363 mrsas_dprint(sc, MRSAS_OCR,"Reset not supported, killing adapter.\n");
2364 mrsas_kill_hba(sc);
2365 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
2366 retval = FAIL;
2367 goto out;
2368 }
2369
2370 /* Now try to reset the chip */
2371 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) {
2372 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2373 MPI2_WRSEQ_FLUSH_KEY_VALUE);
2374 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2375 MPI2_WRSEQ_1ST_KEY_VALUE);
2376 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2377 MPI2_WRSEQ_2ND_KEY_VALUE);
2378 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2379 MPI2_WRSEQ_3RD_KEY_VALUE);
2380 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2381 MPI2_WRSEQ_4TH_KEY_VALUE);
2382 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2383 MPI2_WRSEQ_5TH_KEY_VALUE);
2384 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset),
2385 MPI2_WRSEQ_6TH_KEY_VALUE);
2386
2387 /* Check that the diag write enable (DRWE) bit is on */
2388 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2389 fusion_host_diag));
2390 retry = 0;
2391 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
2392 DELAY(100 * 1000);
2393 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2394 fusion_host_diag));
2395 if (retry++ == 100) {
2396 mrsas_dprint(sc, MRSAS_OCR,
2397 "Host diag unlock failed!\n");
2398 break;
2399 }
2400 }
2401 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
2402 continue;
2403
2404 /* Send chip reset command */
2405 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag),
2406 host_diag | HOST_DIAG_RESET_ADAPTER);
2407 DELAY(3000 * 1000);
2408
2409 /* Make sure reset adapter bit is cleared */
2410 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2411 fusion_host_diag));
2412 retry = 0;
2413 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
2414 DELAY(100 * 1000);
2415 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2416 fusion_host_diag));
2417 if (retry++ == 1000) {
2418 mrsas_dprint(sc, MRSAS_OCR,
2419 "Diag reset adapter never cleared!\n");
2420 break;
2421 }
2422 }
2423 if (host_diag & HOST_DIAG_RESET_ADAPTER)
2424 continue;
2425
2426 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2427 outbound_scratch_pad)) & MFI_STATE_MASK;
2428 retry = 0;
2429
2430 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
2431 DELAY(100 * 1000);
2432 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2433 outbound_scratch_pad)) & MFI_STATE_MASK;
2434 }
2435 if (abs_state <= MFI_STATE_FW_INIT) {
2436 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT,"
2437 " state = 0x%x\n", abs_state);
2438 continue;
2439 }
2440
2441 /* Wait for FW to become ready */
2442 if (mrsas_transition_to_ready(sc, 1)) {
2443 mrsas_dprint(sc, MRSAS_OCR,
2444 "mrsas: Failed to transition controller to ready.\n");
2445 continue;
2446 }
2447
2448 mrsas_reset_reply_desc(sc);
2449 if (mrsas_ioc_init(sc)) {
2450 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
2451 continue;
2452 }
2453
2454 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2455 mrsas_enable_intr(sc);
2456 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2457
2458 /* Re-fire management commands */
2459 for (j = 0 ; j < sc->max_fw_cmds; j++) {
2460 mpt_cmd = sc->mpt_cmd_list[j];
2461 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
2462 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
2463 if (mfi_cmd->frame->dcmd.opcode ==
2464 MR_DCMD_LD_MAP_GET_INFO) {
2465 mrsas_release_mfi_cmd(mfi_cmd);
2466 mrsas_release_mpt_cmd(mpt_cmd);
2467 } else {
2468 req_desc = mrsas_get_request_desc(sc,
2469 mfi_cmd->cmd_id.context.smid - 1);
2470 mrsas_dprint(sc, MRSAS_OCR,
2471 "Re-fire command DCMD opcode 0x%x index %d\n ",
2472 mfi_cmd->frame->dcmd.opcode, j);
2473 if (!req_desc)
2474 device_printf(sc->mrsas_dev,
2475 "Cannot build MPT cmd.\n");
2476 else
2477 mrsas_fire_cmd(sc, req_desc->addr.u.low,
2478 req_desc->addr.u.high);
2479 }
2480 }
2481 }
2482
2483 /* Reset load balance info */
2484 memset(sc->load_balance_info, 0,
2485 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES);
2486
2487 if (!mrsas_get_map_info(sc))
2488 mrsas_sync_map_info(sc);
2489
2490 /* Adapter reset completed successfully */
2491 device_printf(sc->mrsas_dev, "Reset successful\n");
2492 retval = SUCCESS;
2493 goto out;
2494 }
2495 /* Reset failed, kill the adapter */
2496 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n");
2497 mrsas_kill_hba(sc);
2498 retval = FAIL;
2499 } else {
2500 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2501 mrsas_enable_intr(sc);
2502 sc->adprecovery = MRSAS_HBA_OPERATIONAL;
2503 }
2504out:
2505 clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
2506 mrsas_dprint(sc, MRSAS_OCR,
2507 "Reset Exit with %d.\n", retval);
2508 return retval;
2509}
2510
2511/**
2512 * mrsas_kill_hba Kill HBA when OCR is not supported.
2513 * input: Adapter Context.
2514 *
2515 * This function will kill HBA when OCR is not supported.
2516 */
2517void mrsas_kill_hba (struct mrsas_softc *sc)
2518{
2519 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
2520 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
2521 MFI_STOP_ADP);
2522 /* Flush */
2523 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell));
2524}
2525
2526/**
2527 * mrsas_wait_for_outstanding Wait for outstanding commands
2528 * input: Adapter Context.
2529 *
2530 * This function will wait for 180 seconds for outstanding
2531 * commands to be completed.
2532 */
2533int mrsas_wait_for_outstanding(struct mrsas_softc *sc)
2534{
2535 int i, outstanding, retval = 0;
2536 u_int32_t fw_state;
2537
2538 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) {
2539 if (sc->remove_in_progress) {
2540 mrsas_dprint(sc, MRSAS_OCR,
2541 "Driver remove or shutdown called.\n");
2542 retval = 1;
2543 goto out;
2544 }
2545 /* Check if firmware is in fault state */
2546 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
2547 outbound_scratch_pad)) & MFI_STATE_MASK;
2548 if (fw_state == MFI_STATE_FAULT) {
2549 mrsas_dprint(sc, MRSAS_OCR,
2550 "Found FW in FAULT state, will reset adapter.\n");
2551 retval = 1;
2552 goto out;
2553 }
2554 outstanding = atomic_read(&sc->fw_outstanding);
2555 if (!outstanding)
2556 goto out;
2557
2558 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
2559 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d "
2560 "commands to complete\n",i,outstanding);
2561 mrsas_complete_cmd(sc);
2562 }
2563 DELAY(1000 * 1000);
2564 }
2565
2566 if (atomic_read(&sc->fw_outstanding)) {
2567 mrsas_dprint(sc, MRSAS_OCR,
2568 " pending commands remain after waiting,"
2569 " will reset adapter.\n");
2570 retval = 1;
2571 }
2572out:
2573 return retval;
2574}
2575
2576/**
2577 * mrsas_release_mfi_cmd: Return a cmd to free command pool
2578 * input: Command packet for return to free cmd pool
2579 *
2580 * This function returns the MFI command to the command list.
2581 */
2582void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd)
2583{
2584 struct mrsas_softc *sc = cmd->sc;
2585
2586 lockmgr(&sc->mfi_cmd_pool_lock, LK_EXCLUSIVE);
2587 cmd->ccb_ptr = NULL;
2588 cmd->cmd_id.frame_count = 0;
2589 TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next);
2590 lockmgr(&sc->mfi_cmd_pool_lock, LK_RELEASE);
2591
2592 return;
2593}
2594
2595/**
2596 * mrsas_get_controller_info - Returns FW's controller structure
2597 * input: Adapter soft state
2598 * Controller information structure
2599 *
2600 * Issues an internal command (DCMD) to get the FW's controller structure.
2601 * This information is mainly used to find out the maximum IO transfer per
2602 * command supported by the FW.
2603 */
2604static int mrsas_get_ctrl_info(struct mrsas_softc *sc,
2605 struct mrsas_ctrl_info *ctrl_info)
2606{
2607 int retcode = 0;
2608 struct mrsas_mfi_cmd *cmd;
2609 struct mrsas_dcmd_frame *dcmd;
2610
2611 cmd = mrsas_get_mfi_cmd(sc);
2612
2613 if (!cmd) {
2614 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n");
2615 return -ENOMEM;
2616 }
2617 dcmd = &cmd->frame->dcmd;
2618
2619 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) {
2620 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n");
2621 mrsas_release_mfi_cmd(cmd);
2622 return -ENOMEM;
2623 }
2624 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2625
2626 dcmd->cmd = MFI_CMD_DCMD;
2627 dcmd->cmd_status = 0xFF;
2628 dcmd->sge_count = 1;
2629 dcmd->flags = MFI_FRAME_DIR_READ;
2630 dcmd->timeout = 0;
2631 dcmd->pad_0 = 0;
2632 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info);
2633 dcmd->opcode = MR_DCMD_CTRL_GET_INFO;
2634 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
2635 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
2636
2637 if (!mrsas_issue_polled(sc, cmd))
2638 memcpy(ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
2639 else
2640 retcode = 1;
2641
2642 mrsas_free_ctlr_info_cmd(sc);
2643 mrsas_release_mfi_cmd(cmd);
2644 return(retcode);
2645}
2646
2647/**
2648 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command
2649 * input: Adapter soft state
2650 *
2651 * Allocates DMAable memory for the controller info internal command.
2652 */
2653int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc)
2654{
2655 int ctlr_info_size;
2656
2657 /* Allocate get controller info command */
2658 ctlr_info_size = sizeof(struct mrsas_ctrl_info);
2659 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
2660 1, 0, // algnmnt, boundary
2661 BUS_SPACE_MAXADDR_32BIT,// lowaddr
2662 BUS_SPACE_MAXADDR, // highaddr
2663 NULL, NULL, // filter, filterarg
2664 ctlr_info_size, // maxsize
2665 1, // msegments
2666 ctlr_info_size, // maxsegsize
2667 BUS_DMA_ALLOCNOW, // flags
2668 &sc->ctlr_info_tag)) {
2669 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n");
2670 return (ENOMEM);
2671 }
2672 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem,
2673 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) {
2674 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n");
2675 return (ENOMEM);
2676 }
2677 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap,
2678 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb,
2679 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) {
2680 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n");
2681 return (ENOMEM);
2682 }
2683
2684 memset(sc->ctlr_info_mem, 0, ctlr_info_size);
2685 return (0);
2686}
2687
2688/**
2689 * mrsas_free_ctlr_info_cmd: Free memory for controller info command
2690 * input: Adapter soft state
2691 *
2692 * Deallocates memory of the get controller info cmd.
2693 */
2694void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc)
2695{
2696 if (sc->ctlr_info_phys_addr)
2697 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap);
2698 if (sc->ctlr_info_mem != NULL)
2699 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap);
2700 if (sc->ctlr_info_tag != NULL)
2701 bus_dma_tag_destroy(sc->ctlr_info_tag);
2702}
2703
2704/**
2705 * mrsas_issue_polled: Issues a polling command
2706 * inputs: Adapter soft state
2707 * Command packet to be issued
2708 *
2709 * This function is for posting of internal commands to Firmware. MFI
2710 * requires the cmd_status to be set to 0xFF before posting. The maximun
2711 * wait time of the poll response timer is 180 seconds.
2712 */
2713int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2714{
2715 struct mrsas_header *frame_hdr = &cmd->frame->hdr;
2716 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2717 int i, retcode = 0;
2718
2719 frame_hdr->cmd_status = 0xFF;
2720 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2721
2722 /* Issue the frame using inbound queue port */
2723 if (mrsas_issue_dcmd(sc, cmd)) {
2724 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2725 return(1);
2726 }
2727
2728 /*
2729 * Poll response timer to wait for Firmware response. While this
2730 * timer with the DELAY call could block CPU, the time interval for
2731 * this is only 1 millisecond.
2732 */
2733 if (frame_hdr->cmd_status == 0xFF) {
2734 for (i=0; i < (max_wait * 1000); i++){
2735 if (frame_hdr->cmd_status == 0xFF)
2736 DELAY(1000);
2737 else
2738 break;
2739 }
2740 }
2741 if (frame_hdr->cmd_status != 0)
2742 {
2743 if (frame_hdr->cmd_status == 0xFF)
2744 device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
2745 else
2746 device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
2747 retcode = 1;
2748 }
2749 return(retcode);
2750}
2751
2752/**
2753 * mrsas_issue_dcmd - Issues a MFI Pass thru cmd
2754 * input: Adapter soft state
2755 * mfi cmd pointer
2756 *
2757 * This function is called by mrsas_issued_blocked_cmd() and
2758 * mrsas_issued_polled(), to build the MPT command and then fire the
2759 * command to Firmware.
2760 */
2761int
2762mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2763{
2764 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2765
2766 req_desc = mrsas_build_mpt_cmd(sc, cmd);
2767 if (!req_desc) {
2768 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n");
2769 return(1);
2770 }
2771
2772 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high);
2773
2774 return(0);
2775}
2776
2777/**
2778 * mrsas_build_mpt_cmd - Calls helper function to build Passthru cmd
2779 * input: Adapter soft state
2780 * mfi cmd to build
2781 *
2782 * This function is called by mrsas_issue_cmd() to build the MPT-MFI
2783 * passthru command and prepares the MPT command to send to Firmware.
2784 */
2785MRSAS_REQUEST_DESCRIPTOR_UNION *
2786mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2787{
2788 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2789 u_int16_t index;
2790
2791 if (mrsas_build_mptmfi_passthru(sc, cmd)) {
2792 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n");
2793 return NULL;
2794 }
2795
2796 index = cmd->cmd_id.context.smid;
2797
2798 req_desc = mrsas_get_request_desc(sc, index-1);
2799 if(!req_desc)
2800 return NULL;
2801
2802 req_desc->addr.Words = 0;
2803 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2804
2805 req_desc->SCSIIO.SMID = index;
2806
2807 return(req_desc);
2808}
2809
2810/**
2811 * mrsas_build_mptmfi_passthru - Builds a MPT MFI Passthru command
2812 * input: Adapter soft state
2813 * mfi cmd pointer
2814 *
2815 * The MPT command and the io_request are setup as a passthru command.
2816 * The SGE chain address is set to frame_phys_addr of the MFI command.
2817 */
2818u_int8_t
2819mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd)
2820{
2821 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
2822 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req;
2823 struct mrsas_mpt_cmd *mpt_cmd;
2824 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr;
2825
2826 mpt_cmd = mrsas_get_mpt_cmd(sc);
2827 if (!mpt_cmd)
2828 return(1);
2829
2830 /* Save the smid. To be used for returning the cmd */
2831 mfi_cmd->cmd_id.context.smid = mpt_cmd->index;
2832
2833 mpt_cmd->sync_cmd_idx = mfi_cmd->index;
2834
2835 /*
2836 * For cmds where the flag is set, store the flag and check
2837 * on completion. For cmds with this flag, don't call
2838 * mrsas_complete_cmd.
2839 */
2840
2841 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)
2842 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2843
2844 io_req = mpt_cmd->io_request;
2845
2846 if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
2847 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t) &io_req->SGL;
2848 sgl_ptr_end += sc->max_sge_in_main_msg - 1;
2849 sgl_ptr_end->Flags = 0;
2850 }
2851
2852 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
2853
2854 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
2855 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4;
2856 io_req->ChainOffset = sc->chain_offset_mfi_pthru;
2857
2858 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr;
2859
2860 mpi25_ieee_chain->Flags= IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2861 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
2862
2863 mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
2864
2865 return(0);
2866}
2867
2868/**
2869 * mrsas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
2870 * input: Adapter soft state
2871 * Command to be issued
2872 *
2873 * This function waits on an event for the command to be returned
2874 * from the ISR. Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs.
2875 * Used for issuing internal and ioctl commands.
2876 */
2877int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
2878{
2879 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
2880 unsigned long total_time = 0;
2881 int retcode = 0;
2882
2883 /* Initialize cmd_status */
2884 cmd->cmd_status = ECONNREFUSED;
2885
2886 /* Build MPT-MFI command for issue to FW */
2887 if (mrsas_issue_dcmd(sc, cmd)){
2888 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n");
2889 return(1);
2890 }
2891
2892 sc->chan = (void*)&cmd;
2893
2894 /* The following is for debug only... */
2895 //device_printf(sc->mrsas_dev,"DCMD issued to FW, about to sleep-wait...\n");
2896 //device_printf(sc->mrsas_dev,"sc->chan = %p\n", sc->chan);
2897
2898 while (1) {
2899 if (cmd->cmd_status == ECONNREFUSED){
2900 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
2901 }
2902 else
2903 break;
2904 total_time++;
2905 if (total_time >= max_wait) {
2906 device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait);
2907 retcode = 1;
2908 break;
2909 }
2910 }
2911 return(retcode);
2912}
2913
2914/**
2915 * mrsas_complete_mptmfi_passthru - Completes a command
2916 * input: sc: Adapter soft state
2917 * cmd: Command to be completed
2918 * status: cmd completion status
2919 *
2920 * This function is called from mrsas_complete_cmd() after an interrupt
2921 * is received from Firmware, and io_request->Function is
2922 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST.
2923 */
2924void
2925mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd,
2926 u_int8_t status)
2927{
2928 struct mrsas_header *hdr = &cmd->frame->hdr;
2929 u_int8_t cmd_status = cmd->frame->hdr.cmd_status;
2930
2931 /* Reset the retry counter for future re-tries */
2932 cmd->retry_for_fw_reset = 0;
2933
2934 if (cmd->ccb_ptr)
2935 cmd->ccb_ptr = NULL;
2936
2937 switch (hdr->cmd) {
2938 case MFI_CMD_INVALID:
2939 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n");
2940 break;
2941 case MFI_CMD_PD_SCSI_IO:
2942 case MFI_CMD_LD_SCSI_IO:
2943 /*
2944 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
2945 * issued either through an IO path or an IOCTL path. If it
2946 * was via IOCTL, we will send it to internal completion.
2947 */
2948 if (cmd->sync_cmd) {
2949 cmd->sync_cmd = 0;
2950 mrsas_wakeup(sc, cmd);
2951 break;
2952 }
2953 case MFI_CMD_SMP:
2954 case MFI_CMD_STP:
2955 case MFI_CMD_DCMD:
2956 /* Check for LD map update */
2957 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) &&
2958 (cmd->frame->dcmd.mbox.b[1] == 1)) {
2959 sc->fast_path_io = 0;
2960 lockmgr(&sc->raidmap_lock, LK_EXCLUSIVE);
2961 if (cmd_status != 0) {
2962 if (cmd_status != MFI_STAT_NOT_FOUND)
2963 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n",cmd_status);
2964 else {
2965 mrsas_release_mfi_cmd(cmd);
2966 lockmgr(&sc->raidmap_lock, LK_RELEASE);
2967 break;
2968 }
2969 }
2970 else
2971 sc->map_id++;
2972 mrsas_release_mfi_cmd(cmd);
2973 if (MR_ValidateMapInfo(sc))
2974 sc->fast_path_io = 0;
2975 else
2976 sc->fast_path_io = 1;
2977 mrsas_sync_map_info(sc);
2978 lockmgr(&sc->raidmap_lock, LK_RELEASE);
2979 break;
2980 }
2981#if 0 //currently not supporting event handling, so commenting out
2982 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
2983 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
2984 mrsas_poll_wait_aen = 0;
2985 }
2986#endif
2987 /* See if got an event notification */
2988 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
2989 mrsas_complete_aen(sc, cmd);
2990 else
2991 mrsas_wakeup(sc, cmd);
2992 break;
2993 case MFI_CMD_ABORT:
2994 /* Command issued to abort another cmd return */
2995 mrsas_complete_abort(sc, cmd);
2996 break;
2997 default:
2998 device_printf(sc->mrsas_dev,"Unknown command completed! [0x%X]\n", hdr->cmd);
2999 break;
3000 }
3001}
3002
3003/**
3004 * mrsas_wakeup - Completes an internal command
3005 * input: Adapter soft state
3006 * Command to be completed
3007 *
3008 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware,
3009 * a wait timer is started. This function is called from
3010 * mrsas_complete_mptmfi_passthru() as it completes the command,
3011 * to wake up from the command wait.
3012 */
3013void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3014{
3015 cmd->cmd_status = cmd->frame->io.cmd_status;
3016
3017 if (cmd->cmd_status == ECONNREFUSED)
3018 cmd->cmd_status = 0;
3019
3020 /* For debug only ... */
3021 //device_printf(sc->mrsas_dev,"DCMD rec'd for wakeup, sc->chan=%p\n", sc->chan);
3022
3023 sc->chan = (void*)&cmd;
3024 wakeup_one((void *)&sc->chan);
3025 return;
3026}
3027
3028/**
3029 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller
3030 * input: Adapter soft state
3031 * Shutdown/Hibernate
3032 *
3033 * This function issues a DCMD internal command to Firmware to initiate
3034 * shutdown of the controller.
3035 */
3036static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
3037{
3038 struct mrsas_mfi_cmd *cmd;
3039 struct mrsas_dcmd_frame *dcmd;
3040
3041 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3042 return;
3043
3044 cmd = mrsas_get_mfi_cmd(sc);
3045 if (!cmd) {
3046 device_printf(sc->mrsas_dev,"Cannot allocate for shutdown cmd.\n");
3047 return;
3048 }
3049
3050 if (sc->aen_cmd)
3051 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
3052
3053 if (sc->map_update_cmd)
3054 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
3055
3056 dcmd = &cmd->frame->dcmd;
3057 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3058
3059 dcmd->cmd = MFI_CMD_DCMD;
3060 dcmd->cmd_status = 0x0;
3061 dcmd->sge_count = 0;
3062 dcmd->flags = MFI_FRAME_DIR_NONE;
3063 dcmd->timeout = 0;
3064 dcmd->pad_0 = 0;
3065 dcmd->data_xfer_len = 0;
3066 dcmd->opcode = opcode;
3067
3068 device_printf(sc->mrsas_dev,"Preparing to shut down controller.\n");
3069
3070 mrsas_issue_blocked_cmd(sc, cmd);
3071 mrsas_release_mfi_cmd(cmd);
3072
3073 return;
3074}
3075
3076/**
3077 * mrsas_flush_cache: Requests FW to flush all its caches
3078 * input: Adapter soft state
3079 *
3080 * This function is issues a DCMD internal command to Firmware to initiate
3081 * flushing of all caches.
3082 */
3083static void mrsas_flush_cache(struct mrsas_softc *sc)
3084{
3085 struct mrsas_mfi_cmd *cmd;
3086 struct mrsas_dcmd_frame *dcmd;
3087
3088 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)
3089 return;
3090
3091 cmd = mrsas_get_mfi_cmd(sc);
3092 if (!cmd) {
3093 device_printf(sc->mrsas_dev,"Cannot allocate for flush cache cmd.\n");
3094 return;
3095 }
3096
3097 dcmd = &cmd->frame->dcmd;
3098 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3099
3100 dcmd->cmd = MFI_CMD_DCMD;
3101 dcmd->cmd_status = 0x0;
3102 dcmd->sge_count = 0;
3103 dcmd->flags = MFI_FRAME_DIR_NONE;
3104 dcmd->timeout = 0;
3105 dcmd->pad_0 = 0;
3106 dcmd->data_xfer_len = 0;
3107 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH;
3108 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
3109
3110 mrsas_issue_blocked_cmd(sc, cmd);
3111 mrsas_release_mfi_cmd(cmd);
3112
3113 return;
3114}
3115
3116/**
3117 * mrsas_get_map_info: Load and validate RAID map
3118 * input: Adapter instance soft state
3119 *
3120 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo()
3121 * to load and validate RAID map. It returns 0 if successful, 1 other-
3122 * wise.
3123 */
3124static int mrsas_get_map_info(struct mrsas_softc *sc)
3125{
3126 uint8_t retcode = 0;
3127
3128 sc->fast_path_io = 0;
3129 if (!mrsas_get_ld_map_info(sc)) {
3130 retcode = MR_ValidateMapInfo(sc);
3131 if (retcode == 0) {
3132 sc->fast_path_io = 1;
3133 return 0;
3134 }
3135 }
3136 return 1;
3137}
3138
3139/**
3140 * mrsas_get_ld_map_info: Get FW's ld_map structure
3141 * input: Adapter instance soft state
3142 *
3143 * Issues an internal command (DCMD) to get the FW's controller PD
3144 * list structure.
3145 */
3146static int mrsas_get_ld_map_info(struct mrsas_softc *sc)
3147{
3148 int retcode = 0;
3149 struct mrsas_mfi_cmd *cmd;
3150 struct mrsas_dcmd_frame *dcmd;
3151 MR_FW_RAID_MAP_ALL *map;
3152 bus_addr_t map_phys_addr = 0;
3153
3154 cmd = mrsas_get_mfi_cmd(sc);
3155 if (!cmd) {
3156 device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n");
3157 return 1;
3158 }
3159
3160 dcmd = &cmd->frame->dcmd;
3161
3162 map = sc->raidmap_mem[(sc->map_id & 1)];
3163 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)];
3164 if (!map) {
3165 device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n");
3166 mrsas_release_mfi_cmd(cmd);
3167 return (ENOMEM);
3168 }
3169 memset(map, 0, sizeof(*map));
3170 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3171
3172 dcmd->cmd = MFI_CMD_DCMD;
3173 dcmd->cmd_status = 0xFF;
3174 dcmd->sge_count = 1;
3175 dcmd->flags = MFI_FRAME_DIR_READ;
3176 dcmd->timeout = 0;
3177 dcmd->pad_0 = 0;
3178 dcmd->data_xfer_len = sc->map_sz;
3179 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3180 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3181 dcmd->sgl.sge32[0].length = sc->map_sz;
3182 if (!mrsas_issue_polled(sc, cmd))
3183 retcode = 0;
3184 else
3185 {
3186 device_printf(sc->mrsas_dev, "Fail to send get LD map info cmd.\n");
3187 retcode = 1;
3188 }
3189 mrsas_release_mfi_cmd(cmd);
3190 return(retcode);
3191}
3192
3193/**
3194 * mrsas_sync_map_info: Get FW's ld_map structure
3195 * input: Adapter instance soft state
3196 *
3197 * Issues an internal command (DCMD) to get the FW's controller PD
3198 * list structure.
3199 */
3200static int mrsas_sync_map_info(struct mrsas_softc *sc)
3201{
3202 int retcode = 0, i;
3203 struct mrsas_mfi_cmd *cmd;
3204 struct mrsas_dcmd_frame *dcmd;
3205 uint32_t size_sync_info, num_lds;
3206 MR_LD_TARGET_SYNC *target_map = NULL;
3207 MR_FW_RAID_MAP_ALL *map;
3208 MR_LD_RAID *raid;
3209 MR_LD_TARGET_SYNC *ld_sync;
3210 bus_addr_t map_phys_addr = 0;
3211
3212 cmd = mrsas_get_mfi_cmd(sc);
3213 if (!cmd) {
3214 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n");
3215 return 1;
3216 }
3217
3218 map = sc->raidmap_mem[sc->map_id & 1];
3219 num_lds = map->raidMap.ldCount;
3220
3221 dcmd = &cmd->frame->dcmd;
3222 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds;
3223 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3224
3225 target_map = (MR_LD_TARGET_SYNC *)sc->raidmap_mem[(sc->map_id - 1) & 1];
3226 memset(target_map, 0, sizeof(MR_FW_RAID_MAP_ALL));
3227
3228 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1];
3229
3230 ld_sync = (MR_LD_TARGET_SYNC *)target_map;
3231
3232 for (i = 0; i < num_lds; i++, ld_sync++) {
3233 raid = MR_LdRaidGet(i, map);
3234 ld_sync->targetId = MR_GetLDTgtId(i, map);
3235 ld_sync->seqNum = raid->seqNum;
3236 }
3237
3238 dcmd->cmd = MFI_CMD_DCMD;
3239 dcmd->cmd_status = 0xFF;
3240 dcmd->sge_count = 1;
3241 dcmd->flags = MFI_FRAME_DIR_WRITE;
3242 dcmd->timeout = 0;
3243 dcmd->pad_0 = 0;
3244 dcmd->data_xfer_len = sc->map_sz;
3245 dcmd->mbox.b[0] = num_lds;
3246 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG;
3247 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3248 dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
3249 dcmd->sgl.sge32[0].length = sc->map_sz;
3250
3251 sc->map_update_cmd = cmd;
3252 if (mrsas_issue_dcmd(sc, cmd)) {
3253 device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n");
3254 return(1);
3255 }
3256 return(retcode);
3257}
3258
3259/**
3260 * mrsas_get_pd_list: Returns FW's PD list structure
3261 * input: Adapter soft state
3262 *
3263 * Issues an internal command (DCMD) to get the FW's controller PD
3264 * list structure. This information is mainly used to find out about
3265 * system supported by Firmware.
3266 */
3267static int mrsas_get_pd_list(struct mrsas_softc *sc)
3268{
3269 int retcode = 0, pd_index = 0, pd_count=0, pd_list_size;
3270 struct mrsas_mfi_cmd *cmd;
3271 struct mrsas_dcmd_frame *dcmd;
3272 struct MR_PD_LIST *pd_list_mem;
3273 struct MR_PD_ADDRESS *pd_addr;
3274 bus_addr_t pd_list_phys_addr = 0;
3275 struct mrsas_tmp_dcmd *tcmd;
3276
3277 cmd = mrsas_get_mfi_cmd(sc);
3278 if (!cmd) {
3279 device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n");
3280 return 1;
3281 }
3282
3283 dcmd = &cmd->frame->dcmd;
3284
3285 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3286 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3287 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) {
3288 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n");
3289 mrsas_release_mfi_cmd(cmd);
3290 return(ENOMEM);
3291 }
3292 else {
3293 pd_list_mem = tcmd->tmp_dcmd_mem;
3294 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3295 }
3296 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3297
3298 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
3299 dcmd->mbox.b[1] = 0;
3300 dcmd->cmd = MFI_CMD_DCMD;
3301 dcmd->cmd_status = 0xFF;
3302 dcmd->sge_count = 1;
3303 dcmd->flags = MFI_FRAME_DIR_READ;
3304 dcmd->timeout = 0;
3305 dcmd->pad_0 = 0;
3306 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3307 dcmd->opcode = MR_DCMD_PD_LIST_QUERY;
3308 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
3309 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
3310
3311 if (!mrsas_issue_polled(sc, cmd))
3312 retcode = 0;
3313 else
3314 retcode = 1;
3315
3316 /* Get the instance PD list */
3317 pd_count = MRSAS_MAX_PD;
3318 pd_addr = pd_list_mem->addr;
3319 if (retcode == 0 && pd_list_mem->count < pd_count) {
3320 memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
3321 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
3322 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId;
3323 sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType;
3324 sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM;
3325 pd_addr++;
3326 }
3327 }
3328
3329 /* Use mutext/spinlock if pd_list component size increase more than 32 bit. */
3330 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
3331 mrsas_free_tmp_dcmd(tcmd);
3332 mrsas_release_mfi_cmd(cmd);
3333 kfree(tcmd, M_MRSAS);
3334 return(retcode);
3335}
3336
3337/**
3338 * mrsas_get_ld_list: Returns FW's LD list structure
3339 * input: Adapter soft state
3340 *
3341 * Issues an internal command (DCMD) to get the FW's controller PD
3342 * list structure. This information is mainly used to find out about
3343 * supported by the FW.
3344 */
3345static int mrsas_get_ld_list(struct mrsas_softc *sc)
3346{
3347 int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
3348 struct mrsas_mfi_cmd *cmd;
3349 struct mrsas_dcmd_frame *dcmd;
3350 struct MR_LD_LIST *ld_list_mem;
3351 bus_addr_t ld_list_phys_addr = 0;
3352 struct mrsas_tmp_dcmd *tcmd;
3353
3354 cmd = mrsas_get_mfi_cmd(sc);
3355 if (!cmd) {
3356 device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n");
3357 return 1;
3358 }
3359
3360 dcmd = &cmd->frame->dcmd;
3361
3362 tcmd = kmalloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT);
3363 ld_list_size = sizeof(struct MR_LD_LIST);
3364 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) {
3365 device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n");
3366 mrsas_release_mfi_cmd(cmd);
3367 return(ENOMEM);
3368 }
3369 else {
3370 ld_list_mem = tcmd->tmp_dcmd_mem;
3371 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr;
3372 }
3373 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3374
3375 dcmd->cmd = MFI_CMD_DCMD;
3376 dcmd->cmd_status = 0xFF;
3377 dcmd->sge_count = 1;
3378 dcmd->flags = MFI_FRAME_DIR_READ;
3379 dcmd->timeout = 0;
3380 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST);
3381 dcmd->opcode = MR_DCMD_LD_GET_LIST;
3382 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr;
3383 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
3384 dcmd->pad_0 = 0;
3385
3386 if (!mrsas_issue_polled(sc, cmd))
3387 retcode = 0;
3388 else
3389 retcode = 1;
3390
3391 /* Get the instance LD list */
3392 if ((retcode == 0) && (ld_list_mem->ldCount <= (MAX_LOGICAL_DRIVES))){
3393 sc->CurLdCount = ld_list_mem->ldCount;
3394 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD);
3395 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
3396 if (ld_list_mem->ldList[ld_index].state != 0) {
3397 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3398 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
3399 }
3400 }
3401 }
3402
3403 mrsas_free_tmp_dcmd(tcmd);
3404 mrsas_release_mfi_cmd(cmd);
3405 kfree(tcmd, M_MRSAS);
3406 return(retcode);
3407}
3408
3409/**
3410 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command
3411 * input: Adapter soft state
3412 * Temp command
3413 * Size of alloction
3414 *
3415 * Allocates DMAable memory for a temporary internal command. The allocated
3416 * memory is initialized to all zeros upon successful loading of the dma
3417 * mapped memory.
3418 */
3419int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd,
3420 int size)
3421{
3422 if (bus_dma_tag_create( sc->mrsas_parent_tag, // parent
3423 1, 0, // algnmnt, boundary
3424 BUS_SPACE_MAXADDR_32BIT,// lowaddr
3425 BUS_SPACE_MAXADDR, // highaddr
3426 NULL, NULL, // filter, filterarg
3427 size, // maxsize
3428 1, // msegments
3429 size, // maxsegsize
3430 BUS_DMA_ALLOCNOW, // flags
3431 &tcmd->tmp_dcmd_tag)) {
3432 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n");
3433 return (ENOMEM);
3434 }
3435 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem,
3436 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) {
3437 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n");
3438 return (ENOMEM);
3439 }
3440 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap,
3441 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb,
3442 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) {
3443 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n");
3444 return (ENOMEM);
3445 }
3446
3447 memset(tcmd->tmp_dcmd_mem, 0, size);
3448 return (0);
3449}
3450
3451/**
3452 * mrsas_free_tmp_dcmd: Free memory for temporary command
3453 * input: temporary dcmd pointer
3454 *
3455 * Deallocates memory of the temporary command for use in the construction
3456 * of the internal DCMD.
3457 */
3458void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp)
3459{
3460 if (tmp->tmp_dcmd_phys_addr)
3461 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap);
3462 if (tmp->tmp_dcmd_mem != NULL)
3463 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap);
3464 if (tmp->tmp_dcmd_tag != NULL)
3465 bus_dma_tag_destroy(tmp->tmp_dcmd_tag);
3466}
3467
3468/**
3469 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd
3470 * input: Adapter soft state
3471 * Previously issued cmd to be aborted
3472 *
3473 * This function is used to abort previously issued commands, such as AEN and
3474 * RAID map sync map commands. The abort command is sent as a DCMD internal
3475 * command and subsequently the driver will wait for a return status. The
3476 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds.
3477 */
3478static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc,
3479 struct mrsas_mfi_cmd *cmd_to_abort)
3480{
3481 struct mrsas_mfi_cmd *cmd;
3482 struct mrsas_abort_frame *abort_fr;
3483 u_int8_t retcode = 0;
3484 unsigned long total_time = 0;
3485 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
3486
3487 cmd = mrsas_get_mfi_cmd(sc);
3488 if (!cmd) {
3489 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n");
3490 return(1);
3491 }
3492
3493 abort_fr = &cmd->frame->abort;
3494
3495 /* Prepare and issue the abort frame */
3496 abort_fr->cmd = MFI_CMD_ABORT;
3497 abort_fr->cmd_status = 0xFF;
3498 abort_fr->flags = 0;
3499 abort_fr->abort_context = cmd_to_abort->index;
3500 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr;
3501 abort_fr->abort_mfi_phys_addr_hi = 0;
3502
3503 cmd->sync_cmd = 1;
3504 cmd->cmd_status = 0xFF;
3505
3506 if (mrsas_issue_dcmd(sc, cmd)) {
3507 device_printf(sc->mrsas_dev, "Fail to send abort command.\n");
3508 return(1);
3509 }
3510
3511 /* Wait for this cmd to complete */
3512 sc->chan = (void*)&cmd;
3513 while (1) {
3514 if (cmd->cmd_status == 0xFF){
3515 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
3516 }
3517 else
3518 break;
3519 total_time++;
3520 if (total_time >= max_wait) {
3521 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait);
3522 retcode = 1;
3523 break;
3524 }
3525 }
3526
3527 cmd->sync_cmd = 0;
3528 mrsas_release_mfi_cmd(cmd);
3529 return(retcode);
3530}
3531
3532/**
3533 * mrsas_complete_abort: Completes aborting a command
3534 * input: Adapter soft state
3535 * Cmd that was issued to abort another cmd
3536 *
3537 * The mrsas_issue_blocked_abort_cmd() function waits for the command status
3538 * to change after sending the command. This function is called from
3539 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated.
3540 */
3541void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3542{
3543 if (cmd->sync_cmd) {
3544 cmd->sync_cmd = 0;
3545 cmd->cmd_status = 0;
3546 sc->chan = (void*)&cmd;
3547 wakeup_one((void *)&sc->chan);
3548 }
3549 return;
3550}
3551
3552/**
3553 * mrsas_aen_handler: Callback function for AEN processing from thread context.
3554 * input: Adapter soft state
3555 *
3556 */
3557void mrsas_aen_handler(struct mrsas_softc *sc)
3558{
3559 union mrsas_evt_class_locale class_locale;
3560 int doscan = 0;
3561 u_int32_t seq_num;
3562 int error;
3563
3564 if (!sc) {
3565 kprintf("invalid instance!\n");
3566 return;
3567 }
3568
3569 if (sc->evt_detail_mem) {
3570 switch (sc->evt_detail_mem->code) {
3571 case MR_EVT_PD_INSERTED:
3572 mrsas_get_pd_list(sc);
3573 mrsas_bus_scan_sim(sc, sc->sim_1);
3574 doscan = 0;
3575 break;
3576 case MR_EVT_PD_REMOVED:
3577 mrsas_get_pd_list(sc);
3578 mrsas_bus_scan_sim(sc, sc->sim_1);
3579 doscan = 0;
3580 break;
3581 case MR_EVT_LD_OFFLINE:
3582 case MR_EVT_CFG_CLEARED:
3583 case MR_EVT_LD_DELETED:
3584 mrsas_bus_scan_sim(sc, sc->sim_0);
3585 doscan = 0;
3586 break;
3587 case MR_EVT_LD_CREATED:
3588 mrsas_get_ld_list(sc);
3589 mrsas_bus_scan_sim(sc, sc->sim_0);
3590 doscan = 0;
3591 break;
3592 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
3593 case MR_EVT_FOREIGN_CFG_IMPORTED:
3594 case MR_EVT_LD_STATE_CHANGE:
3595 doscan = 1;
3596 break;
3597 default:
3598 doscan = 0;
3599 break;
3600 }
3601 } else {
3602 device_printf(sc->mrsas_dev, "invalid evt_detail\n");
3603 return;
3604 }
3605 if (doscan) {
3606 mrsas_get_pd_list(sc);
3607 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
3608 mrsas_bus_scan_sim(sc, sc->sim_1);
3609 mrsas_get_ld_list(sc);
3610 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
3611 mrsas_bus_scan_sim(sc, sc->sim_0);
3612 }
3613
3614 seq_num = sc->evt_detail_mem->seq_num + 1;
3615
3616 // Register AEN with FW for latest sequence number plus 1
3617 class_locale.members.reserved = 0;
3618 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3619 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3620
3621 if (sc->aen_cmd != NULL )
3622 return ;
3623
3624 lockmgr(&sc->aen_lock, LK_EXCLUSIVE);
3625 error = mrsas_register_aen(sc, seq_num,
3626 class_locale.word);
3627 lockmgr(&sc->aen_lock, LK_RELEASE);
3628
3629 if (error)
3630 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
3631
3632}
3633
3634
3635/**
3636 * mrsas_complete_aen: Completes AEN command
3637 * input: Adapter soft state
3638 * Cmd that was issued to abort another cmd
3639 *
3640 * This function will be called from ISR and will continue
3641 * event processing from thread context by enqueuing task
3642 * in ev_tq (callback function "mrsas_aen_handler").
3643 */
3644void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
3645{
3646 /*
3647 * Don't signal app if it is just an aborted previously registered aen
3648 */
3649 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) {
3650 /* TO DO (?) */
3651 }
3652 else
3653 cmd->abort_aen = 0;
3654
3655 sc->aen_cmd = NULL;
3656 mrsas_release_mfi_cmd(cmd);
3657
3658 if (!sc->remove_in_progress)
3659 taskqueue_enqueue(sc->ev_tq, &sc->ev_task);
3660
3661 return;
3662}
3663
3664static device_method_t mrsas_methods[] = {
3665 DEVMETHOD(device_probe, mrsas_probe),
3666 DEVMETHOD(device_attach, mrsas_attach),
3667 DEVMETHOD(device_detach, mrsas_detach),
3668 DEVMETHOD(device_suspend, mrsas_suspend),
3669 DEVMETHOD(device_resume, mrsas_resume),
3670 DEVMETHOD(bus_print_child, bus_generic_print_child),
3671 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
3672 { 0, 0 }
3673};
3674
3675static driver_t mrsas_driver = {
3676 "mrsas",
3677 mrsas_methods,
3678 sizeof(struct mrsas_softc)
3679};
3680
3681static devclass_t mrsas_devclass;
3682DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, NULL, NULL);
3683MODULE_VERSION(mrsas, 1);
3684MODULE_DEPEND(mrsas, cam, 1, 1, 1);
3685