MPT - fix all compiler warnings
[dragonfly.git] / sys / dev / disk / mpt / mpt_cam.c
CommitLineData
2545bca0
MD
1/*-
2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
41 * redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
63 */
64/*-
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
69 *
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
72 * met:
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
79 * redistribution.
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
83 *
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95 * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.68 2009/07/02 00:43:10 delphij Exp $
96 */
97#include <sys/cdefs.h>
98
99#include <dev/disk/mpt/mpt.h>
100#include <dev/disk/mpt/mpt_cam.h>
101#include <dev/disk/mpt/mpt_raid.h>
102
103#include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104#include "dev/disk/mpt/mpilib/mpi_init.h"
105#include "dev/disk/mpt/mpilib/mpi_targ.h"
106#include "dev/disk/mpt/mpilib/mpi_fc.h"
107#include "dev/disk/mpt/mpilib/mpi_sas.h"
108#if __FreeBSD_version >= 500000
109#include <sys/sysctl.h>
110#endif
111#include <sys/callout.h>
112#include <sys/kthread.h>
113
114#if __FreeBSD_version >= 700025 || defined(__DragonFly__)
115#ifndef CAM_NEW_TRAN_CODE
116#define CAM_NEW_TRAN_CODE 1
117#endif
118#endif
119
120static void mpt_poll(struct cam_sim *);
121static timeout_t mpt_timeout;
122static void mpt_action(struct cam_sim *, union ccb *);
123static int
124mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
125static void mpt_setwidth(struct mpt_softc *, int, int);
126static void mpt_setsync(struct mpt_softc *, int, int, int);
127static int mpt_update_spi_config(struct mpt_softc *, int);
128static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
129
130static mpt_reply_handler_t mpt_scsi_reply_handler;
131static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
132static mpt_reply_handler_t mpt_fc_els_reply_handler;
133static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
134 MSG_DEFAULT_REPLY *);
135static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
136static int mpt_fc_reset_link(struct mpt_softc *, int);
137
138static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
139static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
140static void mpt_recovery_thread(void *arg);
141static void mpt_recover_commands(struct mpt_softc *mpt);
142
143static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
144 u_int, u_int, u_int, int);
145
146static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
147static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
148static int mpt_add_els_buffers(struct mpt_softc *mpt);
149static int mpt_add_target_commands(struct mpt_softc *mpt);
150static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
151static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
152static void mpt_target_start_io(struct mpt_softc *, union ccb *);
153static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
154static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
155static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
156 uint8_t, uint8_t const *);
157static void
158mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
159 tgt_resource_t *, int);
160static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
161static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
162static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
163static mpt_reply_handler_t mpt_sata_pass_reply_handler;
164
165static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
166static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
167static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
168static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
169
170static mpt_probe_handler_t mpt_cam_probe;
171static mpt_attach_handler_t mpt_cam_attach;
172static mpt_enable_handler_t mpt_cam_enable;
173static mpt_ready_handler_t mpt_cam_ready;
174static mpt_event_handler_t mpt_cam_event;
175static mpt_reset_handler_t mpt_cam_ioc_reset;
176static mpt_detach_handler_t mpt_cam_detach;
177
178static struct mpt_personality mpt_cam_personality =
179{
180 .name = "mpt_cam",
181 .probe = mpt_cam_probe,
182 .attach = mpt_cam_attach,
183 .enable = mpt_cam_enable,
184 .ready = mpt_cam_ready,
185 .event = mpt_cam_event,
186 .reset = mpt_cam_ioc_reset,
187 .detach = mpt_cam_detach,
188};
189
190DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
191MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
192
193int mpt_enable_sata_wc = -1;
194TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
195
196int
197mpt_cam_probe(struct mpt_softc *mpt)
198{
199 int role;
200
201 /*
202 * Only attach to nodes that support the initiator or target role
203 * (or want to) or have RAID physical devices that need CAM pass-thru
204 * support.
205 */
206 if (mpt->do_cfg_role) {
207 role = mpt->cfg_role;
208 } else {
209 role = mpt->role;
210 }
211 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
212 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
213 return (0);
214 }
215 return (ENODEV);
216}
217
218int
219mpt_cam_attach(struct mpt_softc *mpt)
220{
221 struct cam_devq *devq;
222 mpt_handler_t handler;
223 int maxq;
224 int error;
225
226 MPT_LOCK(mpt);
227 TAILQ_INIT(&mpt->request_timeout_list);
228 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
229 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
230
231 handler.reply_handler = mpt_scsi_reply_handler;
232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
233 &scsi_io_handler_id);
234 if (error != 0) {
235 MPT_UNLOCK(mpt);
236 goto cleanup;
237 }
238
239 handler.reply_handler = mpt_scsi_tmf_reply_handler;
240 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
241 &scsi_tmf_handler_id);
242 if (error != 0) {
243 MPT_UNLOCK(mpt);
244 goto cleanup;
245 }
246
247 /*
248 * If we're fibre channel and could support target mode, we register
249 * an ELS reply handler and give it resources.
250 */
251 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
252 handler.reply_handler = mpt_fc_els_reply_handler;
253 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
254 &fc_els_handler_id);
255 if (error != 0) {
256 MPT_UNLOCK(mpt);
257 goto cleanup;
258 }
259 if (mpt_add_els_buffers(mpt) == FALSE) {
260 error = ENOMEM;
261 MPT_UNLOCK(mpt);
262 goto cleanup;
263 }
264 maxq -= mpt->els_cmds_allocated;
265 }
266
267 /*
268 * If we support target mode, we register a reply handler for it,
269 * but don't add command resources until we actually enable target
270 * mode.
271 */
272 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
273 handler.reply_handler = mpt_scsi_tgt_reply_handler;
274 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
275 &mpt->scsi_tgt_handler_id);
276 if (error != 0) {
277 MPT_UNLOCK(mpt);
278 goto cleanup;
279 }
280 }
281
282 if (mpt->is_sas) {
283 handler.reply_handler = mpt_sata_pass_reply_handler;
284 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
285 &sata_pass_handler_id);
286 if (error != 0) {
287 MPT_UNLOCK(mpt);
288 goto cleanup;
289 }
290 }
291
292 /*
293 * We keep one request reserved for timeout TMF requests.
294 */
295 mpt->tmf_req = mpt_get_request(mpt, FALSE);
296 if (mpt->tmf_req == NULL) {
297 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
298 error = ENOMEM;
299 MPT_UNLOCK(mpt);
300 goto cleanup;
301 }
302
303 /*
304 * Mark the request as free even though not on the free list.
305 * There is only one TMF request allowed to be outstanding at
306 * a time and the TMF routines perform their own allocation
307 * tracking using the standard state flags.
308 */
309 mpt->tmf_req->state = REQ_STATE_FREE;
310 maxq--;
311
312 /*
313 * The rest of this is CAM foo, for which we need to drop our lock
314 */
315 MPT_UNLOCK(mpt);
316
317 if (mpt_spawn_recovery_thread(mpt) != 0) {
318 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
319 error = ENOMEM;
320 goto cleanup;
321 }
322
323 /*
324 * Create the device queue for our SIM(s).
325 */
326 devq = cam_simq_alloc(maxq);
327 if (devq == NULL) {
328 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
329 error = ENOMEM;
330 goto cleanup;
331 }
332
333 /*
334 * Construct our SIM entry.
335 */
336 mpt->sim =
337 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
338 if (mpt->sim == NULL) {
339 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
340 cam_devq_release(devq);
341 error = ENOMEM;
342 goto cleanup;
343 }
344
345 /*
346 * Register exactly this bus.
347 */
348 MPT_LOCK(mpt);
349 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
350 mpt_prt(mpt, "Bus registration Failed!\n");
351 error = ENOMEM;
352 MPT_UNLOCK(mpt);
353 goto cleanup;
354 }
355
356 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
357 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
358 mpt_prt(mpt, "Unable to allocate Path!\n");
359 error = ENOMEM;
360 MPT_UNLOCK(mpt);
361 goto cleanup;
362 }
363 MPT_UNLOCK(mpt);
364
365 /*
366 * Only register a second bus for RAID physical
367 * devices if the controller supports RAID.
368 */
369 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
370 return (0);
371 }
372
373 /*
374 * Create a "bus" to export all hidden disks to CAM.
375 */
376 mpt->phydisk_sim =
377 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
378 if (mpt->phydisk_sim == NULL) {
379 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
380 error = ENOMEM;
381 goto cleanup;
382 }
383
384 /*
385 * Register this bus.
386 */
387 MPT_LOCK(mpt);
388 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
389 CAM_SUCCESS) {
390 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
391 error = ENOMEM;
392 MPT_UNLOCK(mpt);
393 goto cleanup;
394 }
395
396 if (xpt_create_path(&mpt->phydisk_path, NULL,
397 cam_sim_path(mpt->phydisk_sim),
398 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
399 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
400 error = ENOMEM;
401 MPT_UNLOCK(mpt);
402 goto cleanup;
403 }
404 MPT_UNLOCK(mpt);
405 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
406 return (0);
407
408cleanup:
409 mpt_cam_detach(mpt);
410 return (error);
411}
412
413/*
414 * Read FC configuration information
415 */
416static int
417mpt_read_config_info_fc(struct mpt_softc *mpt)
418{
419 char *topology = NULL;
420 int rv;
421
422 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
423 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
424 if (rv) {
425 return (-1);
426 }
427 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
428 mpt->mpt_fcport_page0.Header.PageVersion,
429 mpt->mpt_fcport_page0.Header.PageLength,
430 mpt->mpt_fcport_page0.Header.PageNumber,
431 mpt->mpt_fcport_page0.Header.PageType);
432
433
434 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
435 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
436 if (rv) {
437 mpt_prt(mpt, "failed to read FC Port Page 0\n");
438 return (-1);
439 }
440 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
441
442 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
443
444 switch (mpt->mpt_fcport_page0.Flags &
445 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
447 mpt->mpt_fcport_speed = 0;
448 topology = "<NO LOOP>";
449 break;
450 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
451 topology = "N-Port";
452 break;
453 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
454 topology = "NL-Port";
455 break;
456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
457 topology = "F-Port";
458 break;
459 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
460 topology = "FL-Port";
461 break;
462 default:
463 mpt->mpt_fcport_speed = 0;
464 topology = "?";
465 break;
466 }
467
468 mpt_lprt(mpt, MPT_PRT_INFO,
469 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
470 "Speed %u-Gbit\n", topology,
3c4c549a
MD
471 (unsigned)mpt->mpt_fcport_page0.WWNN.High,
472 (unsigned)mpt->mpt_fcport_page0.WWNN.Low,
473 (unsigned)mpt->mpt_fcport_page0.WWPN.High,
474 (unsigned)mpt->mpt_fcport_page0.WWPN.Low,
475 (unsigned)mpt->mpt_fcport_speed);
2545bca0
MD
476#if __FreeBSD_version >= 500000
477 MPT_UNLOCK(mpt);
478 {
479 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
480 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
481
482 snprintf(mpt->scinfo.fc.wwnn,
483 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
484 mpt->mpt_fcport_page0.WWNN.High,
485 mpt->mpt_fcport_page0.WWNN.Low);
486
487 snprintf(mpt->scinfo.fc.wwpn,
488 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
489 mpt->mpt_fcport_page0.WWPN.High,
490 mpt->mpt_fcport_page0.WWPN.Low);
491
492 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
493 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
494 "World Wide Node Name");
495
496 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
497 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
498 "World Wide Port Name");
499
500 }
501 MPT_LOCK(mpt);
502#endif
503 return (0);
504}
505
506/*
507 * Set FC configuration information.
508 */
509static int
510mpt_set_initial_config_fc(struct mpt_softc *mpt)
511{
512
513 CONFIG_PAGE_FC_PORT_1 fc;
514 U32 fl;
515 int r, doit = 0;
516 int role;
517
518 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
519 &fc.Header, FALSE, 5000);
520 if (r) {
521 mpt_prt(mpt, "failed to read FC page 1 header\n");
522 return (mpt_fc_reset_link(mpt, 1));
523 }
524
525 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
526 &fc.Header, sizeof (fc), FALSE, 5000);
527 if (r) {
528 mpt_prt(mpt, "failed to read FC page 1\n");
529 return (mpt_fc_reset_link(mpt, 1));
530 }
531 mpt2host_config_page_fc_port_1(&fc);
532
533 /*
534 * Check our flags to make sure we support the role we want.
535 */
536 doit = 0;
537 role = 0;
538 fl = fc.Flags;
539
540 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
541 role |= MPT_ROLE_INITIATOR;
542 }
543 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
544 role |= MPT_ROLE_TARGET;
545 }
546
547 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
548
549 if (mpt->do_cfg_role == 0) {
550 role = mpt->cfg_role;
551 } else {
552 mpt->do_cfg_role = 0;
553 }
554
555 if (role != mpt->cfg_role) {
556 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
557 if ((role & MPT_ROLE_INITIATOR) == 0) {
558 mpt_prt(mpt, "adding initiator role\n");
559 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
560 doit++;
561 } else {
562 mpt_prt(mpt, "keeping initiator role\n");
563 }
564 } else if (role & MPT_ROLE_INITIATOR) {
565 mpt_prt(mpt, "removing initiator role\n");
566 doit++;
567 }
568 if (mpt->cfg_role & MPT_ROLE_TARGET) {
569 if ((role & MPT_ROLE_TARGET) == 0) {
570 mpt_prt(mpt, "adding target role\n");
571 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
572 doit++;
573 } else {
574 mpt_prt(mpt, "keeping target role\n");
575 }
576 } else if (role & MPT_ROLE_TARGET) {
577 mpt_prt(mpt, "removing target role\n");
578 doit++;
579 }
580 mpt->role = mpt->cfg_role;
581 }
582
583 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
584 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
585 mpt_prt(mpt, "adding OXID option\n");
586 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
587 doit++;
588 }
589 }
590
591 if (doit) {
592 fc.Flags = fl;
593 host2mpt_config_page_fc_port_1(&fc);
594 r = mpt_write_cfg_page(mpt,
595 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
596 sizeof(fc), FALSE, 5000);
597 if (r != 0) {
598 mpt_prt(mpt, "failed to update NVRAM with changes\n");
599 return (0);
600 }
601 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
602 "effect until next reboot or IOC reset\n");
603 }
604 return (0);
605}
606
607static int
608mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
609{
610 ConfigExtendedPageHeader_t hdr;
611 struct mptsas_phyinfo *phyinfo;
612 SasIOUnitPage0_t *buffer;
613 int error, len, i;
614
615 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
616 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
617 &hdr, 0, 10000);
618 if (error)
619 goto out;
620 if (hdr.ExtPageLength == 0) {
621 error = ENXIO;
622 goto out;
623 }
624
625 len = hdr.ExtPageLength * 4;
626 buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
627 if (buffer == NULL) {
628 error = ENOMEM;
629 goto out;
630 }
631
632 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
633 0, &hdr, buffer, len, 0, 10000);
634 if (error) {
635 kfree(buffer, M_DEVBUF);
636 goto out;
637 }
638
639 portinfo->num_phys = buffer->NumPhys;
640 portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) *
641 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
642 if (portinfo->phy_info == NULL) {
643 kfree(buffer, M_DEVBUF);
644 error = ENOMEM;
645 goto out;
646 }
647
648 for (i = 0; i < portinfo->num_phys; i++) {
649 phyinfo = &portinfo->phy_info[i];
650 phyinfo->phy_num = i;
651 phyinfo->port_id = buffer->PhyData[i].Port;
652 phyinfo->negotiated_link_rate =
653 buffer->PhyData[i].NegotiatedLinkRate;
654 phyinfo->handle =
655 le16toh(buffer->PhyData[i].ControllerDevHandle);
656 }
657
658 kfree(buffer, M_DEVBUF);
659out:
660 return (error);
661}
662
663static int
664mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
665 uint32_t form, uint32_t form_specific)
666{
667 ConfigExtendedPageHeader_t hdr;
668 SasPhyPage0_t *buffer;
669 int error;
670
671 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
672 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
673 0, 10000);
674 if (error)
675 goto out;
676 if (hdr.ExtPageLength == 0) {
677 error = ENXIO;
678 goto out;
679 }
680
681 buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
682 if (buffer == NULL) {
683 error = ENOMEM;
684 goto out;
685 }
686
687 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
688 form + form_specific, &hdr, buffer,
689 sizeof(SasPhyPage0_t), 0, 10000);
690 if (error) {
691 kfree(buffer, M_DEVBUF);
692 goto out;
693 }
694
695 phy_info->hw_link_rate = buffer->HwLinkRate;
696 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
697 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
698 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
699
700 kfree(buffer, M_DEVBUF);
701out:
702 return (error);
703}
704
705static int
706mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
707 uint32_t form, uint32_t form_specific)
708{
709 ConfigExtendedPageHeader_t hdr;
710 SasDevicePage0_t *buffer;
711 uint64_t sas_address;
712 int error = 0;
713
714 bzero(device_info, sizeof(*device_info));
715 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
716 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
717 &hdr, 0, 10000);
718 if (error)
719 goto out;
720 if (hdr.ExtPageLength == 0) {
721 error = ENXIO;
722 goto out;
723 }
724
725 buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
726 if (buffer == NULL) {
727 error = ENOMEM;
728 goto out;
729 }
730
731 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
732 form + form_specific, &hdr, buffer,
733 sizeof(SasDevicePage0_t), 0, 10000);
734 if (error) {
735 kfree(buffer, M_DEVBUF);
736 goto out;
737 }
738
739 device_info->dev_handle = le16toh(buffer->DevHandle);
740 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
741 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
742 device_info->slot = le16toh(buffer->Slot);
743 device_info->phy_num = buffer->PhyNum;
744 device_info->physical_port = buffer->PhysicalPort;
745 device_info->target_id = buffer->TargetID;
746 device_info->bus = buffer->Bus;
747 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
748 device_info->sas_address = le64toh(sas_address);
749 device_info->device_info = le32toh(buffer->DeviceInfo);
750
751 kfree(buffer, M_DEVBUF);
752out:
753 return (error);
754}
755
756/*
757 * Read SAS configuration information. Nothing to do yet.
758 */
759static int
760mpt_read_config_info_sas(struct mpt_softc *mpt)
761{
762 struct mptsas_portinfo *portinfo;
763 struct mptsas_phyinfo *phyinfo;
764 int error, i;
765
766 portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
767 if (portinfo == NULL)
768 return (ENOMEM);
769
770 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
771 if (error) {
772 kfree(portinfo, M_DEVBUF);
773 return (0);
774 }
775
776 for (i = 0; i < portinfo->num_phys; i++) {
777 phyinfo = &portinfo->phy_info[i];
778 error = mptsas_sas_phy_pg0(mpt, phyinfo,
779 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
780 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
781 if (error)
782 break;
783 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
784 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
785 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
786 phyinfo->handle);
787 if (error)
788 break;
789 phyinfo->identify.phy_num = phyinfo->phy_num = i;
790 if (phyinfo->attached.dev_handle)
791 error = mptsas_sas_device_pg0(mpt,
792 &phyinfo->attached,
793 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
794 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
795 phyinfo->attached.dev_handle);
796 if (error)
797 break;
798 }
799 mpt->sas_portinfo = portinfo;
800 return (0);
801}
802
803static void
804mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
805 int enabled)
806{
807 SataPassthroughRequest_t *pass;
808 request_t *req;
809 int error, status;
810
811 req = mpt_get_request(mpt, 0);
812 if (req == NULL)
813 return;
814
815 pass = req->req_vbuf;
816 bzero(pass, sizeof(SataPassthroughRequest_t));
817 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
818 pass->TargetID = devinfo->target_id;
819 pass->Bus = devinfo->bus;
820 pass->PassthroughFlags = 0;
821 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
822 pass->DataLength = 0;
823 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
824 pass->CommandFIS[0] = 0x27;
825 pass->CommandFIS[1] = 0x80;
826 pass->CommandFIS[2] = 0xef;
827 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
828 pass->CommandFIS[7] = 0x40;
829 pass->CommandFIS[15] = 0x08;
830
831 mpt_check_doorbell(mpt);
832 mpt_send_cmd(mpt, req);
833 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
834 10 * 1000);
835 if (error) {
836 mpt_free_request(mpt, req);
837 kprintf("error %d sending passthrough\n", error);
838 return;
839 }
840
841 status = le16toh(req->IOCStatus);
842 if (status != MPI_IOCSTATUS_SUCCESS) {
843 mpt_free_request(mpt, req);
844 kprintf("IOCSTATUS %d\n", status);
845 return;
846 }
847
848 mpt_free_request(mpt, req);
849}
850
851/*
852 * Set SAS configuration information. Nothing to do yet.
853 */
854static int
855mpt_set_initial_config_sas(struct mpt_softc *mpt)
856{
857 struct mptsas_phyinfo *phyinfo;
858 int i;
859
860 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
861 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
862 phyinfo = &mpt->sas_portinfo->phy_info[i];
863 if (phyinfo->attached.dev_handle == 0)
864 continue;
865 if ((phyinfo->attached.device_info &
866 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
867 continue;
868 if (bootverbose)
869 device_printf(mpt->dev,
870 "%sabling SATA WC on phy %d\n",
871 (mpt_enable_sata_wc) ? "En" : "Dis", i);
872 mptsas_set_sata_wc(mpt, &phyinfo->attached,
873 mpt_enable_sata_wc);
874 }
875 }
876
877 return (0);
878}
879
880static int
881mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
882 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
883{
884 if (req != NULL) {
885
886 if (reply_frame != NULL) {
887 req->IOCStatus = le16toh(reply_frame->IOCStatus);
888 }
889 req->state &= ~REQ_STATE_QUEUED;
890 req->state |= REQ_STATE_DONE;
891 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
892 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
893 wakeup(req);
894 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
895 /*
896 * Whew- we can free this request (late completion)
897 */
898 mpt_free_request(mpt, req);
899 }
900 }
901
902 return (TRUE);
903}
904
905/*
906 * Read SCSI configuration information
907 */
908static int
909mpt_read_config_info_spi(struct mpt_softc *mpt)
910{
911 int rv, i;
912
913 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
914 &mpt->mpt_port_page0.Header, FALSE, 5000);
915 if (rv) {
916 return (-1);
917 }
918 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
919 mpt->mpt_port_page0.Header.PageVersion,
920 mpt->mpt_port_page0.Header.PageLength,
921 mpt->mpt_port_page0.Header.PageNumber,
922 mpt->mpt_port_page0.Header.PageType);
923
924 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
925 &mpt->mpt_port_page1.Header, FALSE, 5000);
926 if (rv) {
927 return (-1);
928 }
929 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
930 mpt->mpt_port_page1.Header.PageVersion,
931 mpt->mpt_port_page1.Header.PageLength,
932 mpt->mpt_port_page1.Header.PageNumber,
933 mpt->mpt_port_page1.Header.PageType);
934
935 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
936 &mpt->mpt_port_page2.Header, FALSE, 5000);
937 if (rv) {
938 return (-1);
939 }
940 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
941 mpt->mpt_port_page2.Header.PageVersion,
942 mpt->mpt_port_page2.Header.PageLength,
943 mpt->mpt_port_page2.Header.PageNumber,
944 mpt->mpt_port_page2.Header.PageType);
945
946 for (i = 0; i < 16; i++) {
947 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
948 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
949 if (rv) {
950 return (-1);
951 }
952 mpt_lprt(mpt, MPT_PRT_DEBUG,
953 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
954 mpt->mpt_dev_page0[i].Header.PageVersion,
955 mpt->mpt_dev_page0[i].Header.PageLength,
956 mpt->mpt_dev_page0[i].Header.PageNumber,
957 mpt->mpt_dev_page0[i].Header.PageType);
958
959 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
960 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
961 if (rv) {
962 return (-1);
963 }
964 mpt_lprt(mpt, MPT_PRT_DEBUG,
965 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
966 mpt->mpt_dev_page1[i].Header.PageVersion,
967 mpt->mpt_dev_page1[i].Header.PageLength,
968 mpt->mpt_dev_page1[i].Header.PageNumber,
969 mpt->mpt_dev_page1[i].Header.PageType);
970 }
971
972 /*
973 * At this point, we don't *have* to fail. As long as we have
974 * valid config header information, we can (barely) lurch
975 * along.
976 */
977
978 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
979 sizeof(mpt->mpt_port_page0), FALSE, 5000);
980 if (rv) {
981 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
982 } else {
983 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
984 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
985 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
3c4c549a
MD
986 (unsigned)mpt->mpt_port_page0.Capabilities,
987 (unsigned)mpt->mpt_port_page0.PhysicalInterface);
2545bca0
MD
988 }
989
990 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
991 sizeof(mpt->mpt_port_page1), FALSE, 5000);
992 if (rv) {
993 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
994 } else {
995 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
996 mpt_lprt(mpt, MPT_PRT_DEBUG,
997 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
3c4c549a
MD
998 (unsigned)mpt->mpt_port_page1.Configuration,
999 (unsigned)mpt->mpt_port_page1.OnBusTimerValue);
2545bca0
MD
1000 }
1001
1002 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
1003 sizeof(mpt->mpt_port_page2), FALSE, 5000);
1004 if (rv) {
1005 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1006 } else {
1007 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1008 "Port Page 2: Flags %x Settings %x\n",
3c4c549a
MD
1009 (unsigned)mpt->mpt_port_page2.PortFlags,
1010 (unsigned)mpt->mpt_port_page2.PortSettings);
2545bca0
MD
1011 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1012 for (i = 0; i < 16; i++) {
1013 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1014 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1015 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1016 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1017 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1018 }
1019 }
1020
1021 for (i = 0; i < 16; i++) {
1022 rv = mpt_read_cur_cfg_page(mpt, i,
1023 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1024 FALSE, 5000);
1025 if (rv) {
1026 mpt_prt(mpt,
1027 "cannot read SPI Target %d Device Page 0\n", i);
1028 continue;
1029 }
1030 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1031 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1032 "target %d page 0: Negotiated Params %x Information %x\n",
3c4c549a
MD
1033 i,
1034 (unsigned)mpt->mpt_dev_page0[i].NegotiatedParameters,
1035 (unsigned)mpt->mpt_dev_page0[i].Information);
2545bca0
MD
1036
1037 rv = mpt_read_cur_cfg_page(mpt, i,
1038 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1039 FALSE, 5000);
1040 if (rv) {
1041 mpt_prt(mpt,
1042 "cannot read SPI Target %d Device Page 1\n", i);
1043 continue;
1044 }
1045 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1046 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1047 "target %d page 1: Requested Params %x Configuration %x\n",
3c4c549a
MD
1048 i,
1049 (unsigned)mpt->mpt_dev_page1[i].RequestedParameters,
1050 (unsigned)mpt->mpt_dev_page1[i].Configuration);
2545bca0
MD
1051 }
1052 return (0);
1053}
1054
1055/*
1056 * Validate SPI configuration information.
1057 *
1058 * In particular, validate SPI Port Page 1.
1059 */
1060static int
1061mpt_set_initial_config_spi(struct mpt_softc *mpt)
1062{
1063 int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
1064 int error;
1065
1066 mpt->mpt_disc_enable = 0xff;
1067 mpt->mpt_tag_enable = 0;
1068
1069 if (mpt->mpt_port_page1.Configuration != pp1val) {
1070 CONFIG_PAGE_SCSI_PORT_1 tmp;
1071
1072 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
3c4c549a
MD
1073 "be %x\n",
1074 (unsigned)mpt->mpt_port_page1.Configuration,
1075 (unsigned)pp1val);
2545bca0
MD
1076 tmp = mpt->mpt_port_page1;
1077 tmp.Configuration = pp1val;
1078 host2mpt_config_page_scsi_port_1(&tmp);
1079 error = mpt_write_cur_cfg_page(mpt, 0,
1080 &tmp.Header, sizeof(tmp), FALSE, 5000);
1081 if (error) {
1082 return (-1);
1083 }
1084 error = mpt_read_cur_cfg_page(mpt, 0,
1085 &tmp.Header, sizeof(tmp), FALSE, 5000);
1086 if (error) {
1087 return (-1);
1088 }
1089 mpt2host_config_page_scsi_port_1(&tmp);
1090 if (tmp.Configuration != pp1val) {
1091 mpt_prt(mpt,
1092 "failed to reset SPI Port Page 1 Config value\n");
1093 return (-1);
1094 }
1095 mpt->mpt_port_page1 = tmp;
1096 }
1097
1098 /*
1099 * The purpose of this exercise is to get
1100 * all targets back to async/narrow.
1101 *
1102 * We skip this step if the BIOS has already negotiated
1103 * speeds with the targets.
1104 */
1105 i = mpt->mpt_port_page2.PortSettings &
1106 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1107 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1108 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1109 "honoring BIOS transfer negotiations\n");
1110 } else {
1111 for (i = 0; i < 16; i++) {
1112 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1113 mpt->mpt_dev_page1[i].Configuration = 0;
1114 (void) mpt_update_spi_config(mpt, i);
1115 }
1116 }
1117 return (0);
1118}
1119
1120int
1121mpt_cam_enable(struct mpt_softc *mpt)
1122{
1123 int error;
1124
1125 MPT_LOCK(mpt);
1126
1127 error = EIO;
1128 if (mpt->is_fc) {
1129 if (mpt_read_config_info_fc(mpt)) {
1130 goto out;
1131 }
1132 if (mpt_set_initial_config_fc(mpt)) {
1133 goto out;
1134 }
1135 } else if (mpt->is_sas) {
1136 if (mpt_read_config_info_sas(mpt)) {
1137 goto out;
1138 }
1139 if (mpt_set_initial_config_sas(mpt)) {
1140 goto out;
1141 }
1142 } else if (mpt->is_spi) {
1143 if (mpt_read_config_info_spi(mpt)) {
1144 goto out;
1145 }
1146 if (mpt_set_initial_config_spi(mpt)) {
1147 goto out;
1148 }
1149 }
1150 error = 0;
1151
1152out:
1153 MPT_UNLOCK(mpt);
1154 return (error);
1155}
1156
1157void
1158mpt_cam_ready(struct mpt_softc *mpt)
1159{
1160 /*
1161 * If we're in target mode, hang out resources now
1162 * so we don't cause the world to hang talking to us.
1163 */
1164 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1165 /*
1166 * Try to add some target command resources
1167 */
1168 MPT_LOCK(mpt);
1169 if (mpt_add_target_commands(mpt) == FALSE) {
1170 mpt_prt(mpt, "failed to add target commands\n");
1171 }
1172 MPT_UNLOCK(mpt);
1173 }
1174 mpt->ready = 1;
1175}
1176
1177void
1178mpt_cam_detach(struct mpt_softc *mpt)
1179{
1180 mpt_handler_t handler;
1181
1182 MPT_LOCK(mpt);
1183 mpt->ready = 0;
1184 mpt_terminate_recovery_thread(mpt);
1185
1186 handler.reply_handler = mpt_scsi_reply_handler;
1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1188 scsi_io_handler_id);
1189 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1191 scsi_tmf_handler_id);
1192 handler.reply_handler = mpt_fc_els_reply_handler;
1193 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1194 fc_els_handler_id);
1195 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1196 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1197 mpt->scsi_tgt_handler_id);
1198 handler.reply_handler = mpt_sata_pass_reply_handler;
1199 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1200 sata_pass_handler_id);
1201
1202 if (mpt->tmf_req != NULL) {
1203 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1204 mpt_free_request(mpt, mpt->tmf_req);
1205 mpt->tmf_req = NULL;
1206 }
1207 if (mpt->sas_portinfo != NULL) {
1208 kfree(mpt->sas_portinfo, M_DEVBUF);
1209 mpt->sas_portinfo = NULL;
1210 }
1211 MPT_UNLOCK(mpt);
1212
1213 if (mpt->sim != NULL) {
1214 xpt_free_path(mpt->path);
1215 xpt_bus_deregister(cam_sim_path(mpt->sim));
1216 cam_sim_free(mpt->sim);
1217 mpt->sim = NULL;
1218 }
1219
1220 if (mpt->phydisk_sim != NULL) {
1221 xpt_free_path(mpt->phydisk_path);
1222 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1223 cam_sim_free(mpt->phydisk_sim);
1224 mpt->phydisk_sim = NULL;
1225 }
1226}
1227
1228/* This routine is used after a system crash to dump core onto the swap device.
1229 */
1230static void
1231mpt_poll(struct cam_sim *sim)
1232{
1233 struct mpt_softc *mpt;
1234
1235 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1236 mpt_intr(mpt);
1237}
1238
1239/*
1240 * Watchdog timeout routine for SCSI requests.
1241 */
1242static void
1243mpt_timeout(void *arg)
1244{
1245 union ccb *ccb;
1246 struct mpt_softc *mpt;
1247 request_t *req;
1248
1249 ccb = (union ccb *)arg;
1250 mpt = ccb->ccb_h.ccb_mpt_ptr;
1251
1252 MPT_LOCK(mpt);
1253 req = ccb->ccb_h.ccb_req_ptr;
1254 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1255 req->serno, ccb, req->ccb);
1256/* XXX: WHAT ARE WE TRYING TO DO HERE? */
1257 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1258 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1259 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1260 req->state |= REQ_STATE_TIMEDOUT;
1261 mpt_wakeup_recovery_thread(mpt);
1262 }
1263 MPT_UNLOCK(mpt);
1264}
1265
1266/*
1267 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1268 *
1269 * Takes a list of physical segments and builds the SGL for SCSI IO command
1270 * and forwards the commard to the IOC after one last check that CAM has not
1271 * aborted the transaction.
1272 */
1273static void
1274mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1275{
1276 request_t *req, *trq;
1277 char *mpt_off;
1278 union ccb *ccb;
1279 struct mpt_softc *mpt;
1280 int seg, first_lim;
1281 uint32_t flags, nxt_off;
1282 void *sglp = NULL;
1283 MSG_REQUEST_HEADER *hdrp;
1284 SGE_SIMPLE64 *se;
1285 SGE_CHAIN64 *ce;
1286 int istgt = 0;
1287
1288 req = (request_t *)arg;
1289 ccb = req->ccb;
1290
1291 mpt = ccb->ccb_h.ccb_mpt_ptr;
1292 req = ccb->ccb_h.ccb_req_ptr;
1293
1294 hdrp = req->req_vbuf;
1295 mpt_off = req->req_vbuf;
1296
1297 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1298 error = EFBIG;
1299 }
1300
1301 if (error == 0) {
1302 switch (hdrp->Function) {
1303 case MPI_FUNCTION_SCSI_IO_REQUEST:
1304 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1305 istgt = 0;
1306 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1307 break;
1308 case MPI_FUNCTION_TARGET_ASSIST:
1309 istgt = 1;
1310 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1311 break;
1312 default:
1313 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1314 hdrp->Function);
1315 error = EINVAL;
1316 break;
1317 }
1318 }
1319
1320 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1321 error = EFBIG;
1322 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1323 nseg, mpt->max_seg_cnt);
1324 }
1325
1326bad:
1327 if (error != 0) {
1328 if (error != EFBIG && error != ENOMEM) {
1329 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1330 }
1331 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1332 cam_status status;
1333 mpt_freeze_ccb(ccb);
1334 if (error == EFBIG) {
1335 status = CAM_REQ_TOO_BIG;
1336 } else if (error == ENOMEM) {
1337 if (mpt->outofbeer == 0) {
1338 mpt->outofbeer = 1;
1339 xpt_freeze_simq(mpt->sim, 1);
1340 mpt_lprt(mpt, MPT_PRT_DEBUG,
1341 "FREEZEQ\n");
1342 }
1343 status = CAM_REQUEUE_REQ;
1344 } else {
1345 status = CAM_REQ_CMP_ERR;
1346 }
1347 mpt_set_ccb_status(ccb, status);
1348 }
1349 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1350 request_t *cmd_req =
1351 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1352 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1353 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1354 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1355 }
1356 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1357 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1358 xpt_done(ccb);
1359 CAMLOCK_2_MPTLOCK(mpt);
1360 mpt_free_request(mpt, req);
1361 MPTLOCK_2_CAMLOCK(mpt);
1362 return;
1363 }
1364
1365 /*
1366 * No data to transfer?
1367 * Just make a single simple SGL with zero length.
1368 */
1369
1370 if (mpt->verbose >= MPT_PRT_DEBUG) {
1371 int tidx = ((char *)sglp) - mpt_off;
1372 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1373 }
1374
1375 if (nseg == 0) {
1376 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1377 MPI_pSGE_SET_FLAGS(se1,
1378 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1379 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1380 se1->FlagsLength = htole32(se1->FlagsLength);
1381 goto out;
1382 }
1383
1384
1385 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1386 if (istgt == 0) {
1387 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1388 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1389 }
1390 } else {
1391 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1392 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1393 }
1394 }
1395
1396 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1397 bus_dmasync_op_t op;
1398 if (istgt == 0) {
1399 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1400 op = BUS_DMASYNC_PREREAD;
1401 } else {
1402 op = BUS_DMASYNC_PREWRITE;
1403 }
1404 } else {
1405 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1406 op = BUS_DMASYNC_PREWRITE;
1407 } else {
1408 op = BUS_DMASYNC_PREREAD;
1409 }
1410 }
1411 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1412 }
1413
1414 /*
1415 * Okay, fill in what we can at the end of the command frame.
1416 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1417 * the command frame.
1418 *
1419 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1420 * SIMPLE64 pointers and start doing CHAIN64 entries after
1421 * that.
1422 */
1423
1424 if (nseg < MPT_NSGL_FIRST(mpt)) {
1425 first_lim = nseg;
1426 } else {
1427 /*
1428 * Leave room for CHAIN element
1429 */
1430 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1431 }
1432
1433 se = (SGE_SIMPLE64 *) sglp;
1434 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1435 uint32_t tf;
1436
1437 memset(se, 0, sizeof (*se));
1438 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1439 if (sizeof(bus_addr_t) > 4) {
1440 se->Address.High =
1441 htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1442 }
1443 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1444 tf = flags;
1445 if (seg == first_lim - 1) {
1446 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1447 }
1448 if (seg == nseg - 1) {
1449 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1450 MPI_SGE_FLAGS_END_OF_BUFFER;
1451 }
1452 MPI_pSGE_SET_FLAGS(se, tf);
1453 se->FlagsLength = htole32(se->FlagsLength);
1454 }
1455
1456 if (seg == nseg) {
1457 goto out;
1458 }
1459
1460 /*
1461 * Tell the IOC where to find the first chain element.
1462 */
1463 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1464 nxt_off = MPT_RQSL(mpt);
1465 trq = req;
1466
1467 /*
1468 * Make up the rest of the data segments out of a chain element
1469 * (contiained in the current request frame) which points to
1470 * SIMPLE64 elements in the next request frame, possibly ending
1471 * with *another* chain element (if there's more).
1472 */
1473 while (seg < nseg) {
1474 int this_seg_lim;
1475 uint32_t tf, cur_off;
1476 bus_addr_t chain_list_addr;
1477
1478 /*
1479 * Point to the chain descriptor. Note that the chain
1480 * descriptor is at the end of the *previous* list (whether
1481 * chain or simple).
1482 */
1483 ce = (SGE_CHAIN64 *) se;
1484
1485 /*
1486 * Before we change our current pointer, make sure we won't
1487 * overflow the request area with this frame. Note that we
1488 * test against 'greater than' here as it's okay in this case
1489 * to have next offset be just outside the request area.
1490 */
1491 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1492 nxt_off = MPT_REQUEST_AREA;
1493 goto next_chain;
1494 }
1495
1496 /*
1497 * Set our SGE element pointer to the beginning of the chain
1498 * list and update our next chain list offset.
1499 */
1500 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1501 cur_off = nxt_off;
1502 nxt_off += MPT_RQSL(mpt);
1503
1504 /*
1505 * Now initialized the chain descriptor.
1506 */
1507 memset(ce, 0, sizeof (*ce));
1508
1509 /*
1510 * Get the physical address of the chain list.
1511 */
1512 chain_list_addr = trq->req_pbuf;
1513 chain_list_addr += cur_off;
1514 if (sizeof (bus_addr_t) > 4) {
1515 ce->Address.High =
1516 htole32(((uint64_t)chain_list_addr) >> 32);
1517 }
1518 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1519 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1520 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1521
1522 /*
1523 * If we have more than a frame's worth of segments left,
1524 * set up the chain list to have the last element be another
1525 * chain descriptor.
1526 */
1527 if ((nseg - seg) > MPT_NSGL(mpt)) {
1528 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1529 /*
1530 * The length of the chain is the length in bytes of the
1531 * number of segments plus the next chain element.
1532 *
1533 * The next chain descriptor offset is the length,
1534 * in words, of the number of segments.
1535 */
1536 ce->Length = (this_seg_lim - seg) *
1537 sizeof (SGE_SIMPLE64);
1538 ce->NextChainOffset = ce->Length >> 2;
1539 ce->Length += sizeof (SGE_CHAIN64);
1540 } else {
1541 this_seg_lim = nseg;
1542 ce->Length = (this_seg_lim - seg) *
1543 sizeof (SGE_SIMPLE64);
1544 }
1545 ce->Length = htole16(ce->Length);
1546
1547 /*
1548 * Fill in the chain list SGE elements with our segment data.
1549 *
1550 * If we're the last element in this chain list, set the last
1551 * element flag. If we're the completely last element period,
1552 * set the end of list and end of buffer flags.
1553 */
1554 while (seg < this_seg_lim) {
1555 memset(se, 0, sizeof (*se));
1556 se->Address.Low = htole32(dm_segs->ds_addr &
1557 0xffffffff);
1558 if (sizeof (bus_addr_t) > 4) {
1559 se->Address.High =
1560 htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1561 }
1562 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1563 tf = flags;
1564 if (seg == this_seg_lim - 1) {
1565 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1566 }
1567 if (seg == nseg - 1) {
1568 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1569 MPI_SGE_FLAGS_END_OF_BUFFER;
1570 }
1571 MPI_pSGE_SET_FLAGS(se, tf);
1572 se->FlagsLength = htole32(se->FlagsLength);
1573 se++;
1574 seg++;
1575 dm_segs++;
1576 }
1577
1578 next_chain:
1579 /*
1580 * If we have more segments to do and we've used up all of
1581 * the space in a request area, go allocate another one
1582 * and chain to that.
1583 */
1584 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1585 request_t *nrq;
1586
1587 CAMLOCK_2_MPTLOCK(mpt);
1588 nrq = mpt_get_request(mpt, FALSE);
1589 MPTLOCK_2_CAMLOCK(mpt);
1590
1591 if (nrq == NULL) {
1592 error = ENOMEM;
1593 goto bad;
1594 }
1595
1596 /*
1597 * Append the new request area on the tail of our list.
1598 */
1599 if ((trq = req->chain) == NULL) {
1600 req->chain = nrq;
1601 } else {
1602 while (trq->chain != NULL) {
1603 trq = trq->chain;
1604 }
1605 trq->chain = nrq;
1606 }
1607 trq = nrq;
1608 mpt_off = trq->req_vbuf;
1609 if (mpt->verbose >= MPT_PRT_DEBUG) {
1610 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1611 }
1612 nxt_off = 0;
1613 }
1614 }
1615out:
1616
1617 /*
1618 * Last time we need to check if this CCB needs to be aborted.
1619 */
1620 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1621 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1622 request_t *cmd_req =
1623 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1624 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1625 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1626 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1627 }
1628 mpt_prt(mpt,
1629 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1630 ccb->ccb_h.status & CAM_STATUS_MASK);
1631 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1632 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1633 }
1634 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1635 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1636 xpt_done(ccb);
1637 CAMLOCK_2_MPTLOCK(mpt);
1638 mpt_free_request(mpt, req);
1639 MPTLOCK_2_CAMLOCK(mpt);
1640 return;
1641 }
1642
1643 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1644 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1645 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1646 mpt_timeout, ccb);
1647 }
1648 if (mpt->verbose > MPT_PRT_DEBUG) {
1649 int nc = 0;
1650 mpt_print_request(req->req_vbuf);
1651 for (trq = req->chain; trq; trq = trq->chain) {
1652 kprintf(" Additional Chain Area %d\n", nc++);
1653 mpt_dump_sgl(trq->req_vbuf, 0);
1654 }
1655 }
1656
1657 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1658 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1659 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1660#ifdef WE_TRUST_AUTO_GOOD_STATUS
1661 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1662 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1663 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1664 } else {
1665 tgt->state = TGT_STATE_MOVING_DATA;
1666 }
1667#else
1668 tgt->state = TGT_STATE_MOVING_DATA;
1669#endif
1670 }
1671 CAMLOCK_2_MPTLOCK(mpt);
1672 mpt_send_cmd(mpt, req);
1673 MPTLOCK_2_CAMLOCK(mpt);
1674}
1675
1676static void
1677mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1678{
1679 request_t *req, *trq;
1680 char *mpt_off;
1681 union ccb *ccb;
1682 struct mpt_softc *mpt;
1683 int seg, first_lim;
1684 uint32_t flags, nxt_off;
1685 void *sglp = NULL;
1686 MSG_REQUEST_HEADER *hdrp;
1687 SGE_SIMPLE32 *se;
1688 SGE_CHAIN32 *ce;
1689 int istgt = 0;
1690
1691 req = (request_t *)arg;
1692 ccb = req->ccb;
1693
1694 mpt = ccb->ccb_h.ccb_mpt_ptr;
1695 req = ccb->ccb_h.ccb_req_ptr;
1696
1697 hdrp = req->req_vbuf;
1698 mpt_off = req->req_vbuf;
1699
1700
1701 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1702 error = EFBIG;
1703 }
1704
1705 if (error == 0) {
1706 switch (hdrp->Function) {
1707 case MPI_FUNCTION_SCSI_IO_REQUEST:
1708 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1709 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1710 break;
1711 case MPI_FUNCTION_TARGET_ASSIST:
1712 istgt = 1;
1713 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1714 break;
1715 default:
1716 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1717 hdrp->Function);
1718 error = EINVAL;
1719 break;
1720 }
1721 }
1722
1723 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1724 error = EFBIG;
1725 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1726 nseg, mpt->max_seg_cnt);
1727 }
1728
1729bad:
1730 if (error != 0) {
1731 if (error != EFBIG && error != ENOMEM) {
1732 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1733 }
1734 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1735 cam_status status;
1736 mpt_freeze_ccb(ccb);
1737 if (error == EFBIG) {
1738 status = CAM_REQ_TOO_BIG;
1739 } else if (error == ENOMEM) {
1740 if (mpt->outofbeer == 0) {
1741 mpt->outofbeer = 1;
1742 xpt_freeze_simq(mpt->sim, 1);
1743 mpt_lprt(mpt, MPT_PRT_DEBUG,
1744 "FREEZEQ\n");
1745 }
1746 status = CAM_REQUEUE_REQ;
1747 } else {
1748 status = CAM_REQ_CMP_ERR;
1749 }
1750 mpt_set_ccb_status(ccb, status);
1751 }
1752 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1753 request_t *cmd_req =
1754 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1755 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1756 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1757 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1758 }
1759 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1760 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1761 xpt_done(ccb);
1762 CAMLOCK_2_MPTLOCK(mpt);
1763 mpt_free_request(mpt, req);
1764 MPTLOCK_2_CAMLOCK(mpt);
1765 return;
1766 }
1767
1768 /*
1769 * No data to transfer?
1770 * Just make a single simple SGL with zero length.
1771 */
1772
1773 if (mpt->verbose >= MPT_PRT_DEBUG) {
1774 int tidx = ((char *)sglp) - mpt_off;
1775 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1776 }
1777
1778 if (nseg == 0) {
1779 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1780 MPI_pSGE_SET_FLAGS(se1,
1781 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1782 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1783 se1->FlagsLength = htole32(se1->FlagsLength);
1784 goto out;
1785 }
1786
1787
1788 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1789 if (istgt == 0) {
1790 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1791 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1792 }
1793 } else {
1794 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1795 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1796 }
1797 }
1798
1799 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1800 bus_dmasync_op_t op;
1801 if (istgt) {
1802 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1803 op = BUS_DMASYNC_PREREAD;
1804 } else {
1805 op = BUS_DMASYNC_PREWRITE;
1806 }
1807 } else {
1808 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1809 op = BUS_DMASYNC_PREWRITE;
1810 } else {
1811 op = BUS_DMASYNC_PREREAD;
1812 }
1813 }
1814 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1815 }
1816
1817 /*
1818 * Okay, fill in what we can at the end of the command frame.
1819 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1820 * the command frame.
1821 *
1822 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1823 * SIMPLE32 pointers and start doing CHAIN32 entries after
1824 * that.
1825 */
1826
1827 if (nseg < MPT_NSGL_FIRST(mpt)) {
1828 first_lim = nseg;
1829 } else {
1830 /*
1831 * Leave room for CHAIN element
1832 */
1833 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1834 }
1835
1836 se = (SGE_SIMPLE32 *) sglp;
1837 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1838 uint32_t tf;
1839
1840 memset(se, 0,sizeof (*se));
1841 se->Address = htole32(dm_segs->ds_addr);
1842
1843
1844
1845 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1846 tf = flags;
1847 if (seg == first_lim - 1) {
1848 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1849 }
1850 if (seg == nseg - 1) {
1851 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1852 MPI_SGE_FLAGS_END_OF_BUFFER;
1853 }
1854 MPI_pSGE_SET_FLAGS(se, tf);
1855 se->FlagsLength = htole32(se->FlagsLength);
1856 }
1857
1858 if (seg == nseg) {
1859 goto out;
1860 }
1861
1862 /*
1863 * Tell the IOC where to find the first chain element.
1864 */
1865 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1866 nxt_off = MPT_RQSL(mpt);
1867 trq = req;
1868
1869 /*
1870 * Make up the rest of the data segments out of a chain element
1871 * (contiained in the current request frame) which points to
1872 * SIMPLE32 elements in the next request frame, possibly ending
1873 * with *another* chain element (if there's more).
1874 */
1875 while (seg < nseg) {
1876 int this_seg_lim;
1877 uint32_t tf, cur_off;
1878 bus_addr_t chain_list_addr;
1879
1880 /*
1881 * Point to the chain descriptor. Note that the chain
1882 * descriptor is at the end of the *previous* list (whether
1883 * chain or simple).
1884 */
1885 ce = (SGE_CHAIN32 *) se;
1886
1887 /*
1888 * Before we change our current pointer, make sure we won't
1889 * overflow the request area with this frame. Note that we
1890 * test against 'greater than' here as it's okay in this case
1891 * to have next offset be just outside the request area.
1892 */
1893 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1894 nxt_off = MPT_REQUEST_AREA;
1895 goto next_chain;
1896 }
1897
1898 /*
1899 * Set our SGE element pointer to the beginning of the chain
1900 * list and update our next chain list offset.
1901 */
1902 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1903 cur_off = nxt_off;
1904 nxt_off += MPT_RQSL(mpt);
1905
1906 /*
1907 * Now initialized the chain descriptor.
1908 */
1909 memset(ce, 0, sizeof (*ce));
1910
1911 /*
1912 * Get the physical address of the chain list.
1913 */
1914 chain_list_addr = trq->req_pbuf;
1915 chain_list_addr += cur_off;
1916
1917
1918
1919 ce->Address = htole32(chain_list_addr);
1920 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1921
1922
1923 /*
1924 * If we have more than a frame's worth of segments left,
1925 * set up the chain list to have the last element be another
1926 * chain descriptor.
1927 */
1928 if ((nseg - seg) > MPT_NSGL(mpt)) {
1929 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1930 /*
1931 * The length of the chain is the length in bytes of the
1932 * number of segments plus the next chain element.
1933 *
1934 * The next chain descriptor offset is the length,
1935 * in words, of the number of segments.
1936 */
1937 ce->Length = (this_seg_lim - seg) *
1938 sizeof (SGE_SIMPLE32);
1939 ce->NextChainOffset = ce->Length >> 2;
1940 ce->Length += sizeof (SGE_CHAIN32);
1941 } else {
1942 this_seg_lim = nseg;
1943 ce->Length = (this_seg_lim - seg) *
1944 sizeof (SGE_SIMPLE32);
1945 }
1946 ce->Length = htole16(ce->Length);
1947
1948 /*
1949 * Fill in the chain list SGE elements with our segment data.
1950 *
1951 * If we're the last element in this chain list, set the last
1952 * element flag. If we're the completely last element period,
1953 * set the end of list and end of buffer flags.
1954 */
1955 while (seg < this_seg_lim) {
1956 memset(se, 0, sizeof (*se));
1957 se->Address = htole32(dm_segs->ds_addr);
1958
1959
1960
1961
1962 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1963 tf = flags;
1964 if (seg == this_seg_lim - 1) {
1965 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1966 }
1967 if (seg == nseg - 1) {
1968 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1969 MPI_SGE_FLAGS_END_OF_BUFFER;
1970 }
1971 MPI_pSGE_SET_FLAGS(se, tf);
1972 se->FlagsLength = htole32(se->FlagsLength);
1973 se++;
1974 seg++;
1975 dm_segs++;
1976 }
1977
1978 next_chain:
1979 /*
1980 * If we have more segments to do and we've used up all of
1981 * the space in a request area, go allocate another one
1982 * and chain to that.
1983 */
1984 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1985 request_t *nrq;
1986
1987 CAMLOCK_2_MPTLOCK(mpt);
1988 nrq = mpt_get_request(mpt, FALSE);
1989 MPTLOCK_2_CAMLOCK(mpt);
1990
1991 if (nrq == NULL) {
1992 error = ENOMEM;
1993 goto bad;
1994 }
1995
1996 /*
1997 * Append the new request area on the tail of our list.
1998 */
1999 if ((trq = req->chain) == NULL) {
2000 req->chain = nrq;
2001 } else {
2002 while (trq->chain != NULL) {
2003 trq = trq->chain;
2004 }
2005 trq->chain = nrq;
2006 }
2007 trq = nrq;
2008 mpt_off = trq->req_vbuf;
2009 if (mpt->verbose >= MPT_PRT_DEBUG) {
2010 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2011 }
2012 nxt_off = 0;
2013 }
2014 }
2015out:
2016
2017 /*
2018 * Last time we need to check if this CCB needs to be aborted.
2019 */
2020 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2021 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2022 request_t *cmd_req =
2023 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2024 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2025 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2026 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2027 }
2028 mpt_prt(mpt,
2029 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2030 ccb->ccb_h.status & CAM_STATUS_MASK);
2031 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2032 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2033 }
2034 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2035 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2036 xpt_done(ccb);
2037 CAMLOCK_2_MPTLOCK(mpt);
2038 mpt_free_request(mpt, req);
2039 MPTLOCK_2_CAMLOCK(mpt);
2040 return;
2041 }
2042
2043 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2044 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2045 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2046 mpt_timeout, ccb);
2047 }
2048 if (mpt->verbose > MPT_PRT_DEBUG) {
2049 int nc = 0;
2050 mpt_print_request(req->req_vbuf);
2051 for (trq = req->chain; trq; trq = trq->chain) {
2052 kprintf(" Additional Chain Area %d\n", nc++);
2053 mpt_dump_sgl(trq->req_vbuf, 0);
2054 }
2055 }
2056
2057 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2058 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2059 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2060#ifdef WE_TRUST_AUTO_GOOD_STATUS
2061 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2062 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2063 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2064 } else {
2065 tgt->state = TGT_STATE_MOVING_DATA;
2066 }
2067#else
2068 tgt->state = TGT_STATE_MOVING_DATA;
2069#endif
2070 }
2071 CAMLOCK_2_MPTLOCK(mpt);
2072 mpt_send_cmd(mpt, req);
2073 MPTLOCK_2_CAMLOCK(mpt);
2074}
2075
2076static void
2077mpt_start(struct cam_sim *sim, union ccb *ccb)
2078{
2079 request_t *req;
2080 struct mpt_softc *mpt;
2081 MSG_SCSI_IO_REQUEST *mpt_req;
2082 struct ccb_scsiio *csio = &ccb->csio;
2083 struct ccb_hdr *ccbh = &ccb->ccb_h;
2084 bus_dmamap_callback_t *cb;
2085 target_id_t tgt;
2086 int raid_passthru;
2087
2088 /* Get the pointer for the physical addapter */
2089 mpt = ccb->ccb_h.ccb_mpt_ptr;
2090 raid_passthru = (sim == mpt->phydisk_sim);
2091
2092 CAMLOCK_2_MPTLOCK(mpt);
2093 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2094 if (mpt->outofbeer == 0) {
2095 mpt->outofbeer = 1;
2096 xpt_freeze_simq(mpt->sim, 1);
2097 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2098 }
2099 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2100 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2101 MPTLOCK_2_CAMLOCK(mpt);
2102 xpt_done(ccb);
2103 return;
2104 }
2105#ifdef INVARIANTS
2106 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2107#endif
2108 MPTLOCK_2_CAMLOCK(mpt);
2109
2110 if (sizeof (bus_addr_t) > 4) {
2111 cb = mpt_execute_req_a64;
2112 } else {
2113 cb = mpt_execute_req;
2114 }
2115
2116 /*
2117 * Link the ccb and the request structure so we can find
2118 * the other knowing either the request or the ccb
2119 */
2120 req->ccb = ccb;
2121 ccb->ccb_h.ccb_req_ptr = req;
2122
2123 /* Now we build the command for the IOC */
2124 mpt_req = req->req_vbuf;
2125 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2126
2127 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2128 if (raid_passthru) {
2129 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2130 CAMLOCK_2_MPTLOCK(mpt);
2131 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2132 MPTLOCK_2_CAMLOCK(mpt);
2133 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2134 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2135 xpt_done(ccb);
2136 return;
2137 }
2138 MPTLOCK_2_CAMLOCK(mpt);
2139 mpt_req->Bus = 0; /* we never set bus here */
2140 } else {
2141 tgt = ccb->ccb_h.target_id;
2142 mpt_req->Bus = 0; /* XXX */
2143
2144 }
2145 mpt_req->SenseBufferLength =
2146 (csio->sense_len < MPT_SENSE_SIZE) ?
2147 csio->sense_len : MPT_SENSE_SIZE;
2148
2149 /*
2150 * We use the message context to find the request structure when we
2151 * Get the command completion interrupt from the IOC.
2152 */
2153 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2154
2155 /* Which physical device to do the I/O on */
2156 mpt_req->TargetID = tgt;
2157
2158 /* We assume a single level LUN type */
2159 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2160 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2161 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2162 } else {
2163 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2164 }
2165
2166 /* Set the direction of the transfer */
2167 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2168 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2169 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2170 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2171 } else {
2172 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2173 }
2174
2175 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2176 switch(ccb->csio.tag_action) {
2177 case MSG_HEAD_OF_Q_TAG:
2178 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2179 break;
2180 case MSG_ACA_TASK:
2181 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2182 break;
2183 case MSG_ORDERED_Q_TAG:
2184 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2185 break;
2186 case MSG_SIMPLE_Q_TAG:
2187 default:
2188 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2189 break;
2190 }
2191 } else {
2192 if (mpt->is_fc || mpt->is_sas) {
2193 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2194 } else {
2195 /* XXX No such thing for a target doing packetized. */
2196 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2197 }
2198 }
2199
2200 if (mpt->is_spi) {
2201 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2202 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2203 }
2204 }
2205 mpt_req->Control = htole32(mpt_req->Control);
2206
2207 /* Copy the scsi command block into place */
2208 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2209 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2210 } else {
2211 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2212 }
2213
2214 mpt_req->CDBLength = csio->cdb_len;
2215 mpt_req->DataLength = htole32(csio->dxfer_len);
2216 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2217
2218 /*
2219 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2220 */
2221 if (mpt->verbose == MPT_PRT_DEBUG) {
2222 U32 df;
2223 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2224 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2225 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2226 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2227 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2228 mpt_prtc(mpt, "(%s %u byte%s ",
2229 (df == MPI_SCSIIO_CONTROL_READ)?
2230 "read" : "write", csio->dxfer_len,
2231 (csio->dxfer_len == 1)? ")" : "s)");
2232 }
2233 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2234 ccb->ccb_h.target_lun, req, req->serno);
2235 }
2236
2237 /*
2238 * If we have any data to send with this command map it into bus space.
2239 */
2240 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2241 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2242 /*
2243 * We've been given a pointer to a single buffer.
2244 */
2245 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2246 /*
2247 * Virtual address that needs to translated into
2248 * one or more physical address ranges.
2249 */
2250 int error;
2545bca0
MD
2251 error = bus_dmamap_load(mpt->buffer_dmat,
2252 req->dmap, csio->data_ptr, csio->dxfer_len,
2253 cb, req, 0);
2545bca0
MD
2254 if (error == EINPROGRESS) {
2255 /*
2256 * So as to maintain ordering,
2257 * freeze the controller queue
2258 * until our mapping is
2259 * returned.
2260 */
2261 xpt_freeze_simq(mpt->sim, 1);
2262 ccbh->status |= CAM_RELEASE_SIMQ;
2263 }
2264 } else {
2265 /*
2266 * We have been given a pointer to single
2267 * physical buffer.
2268 */
2269 struct bus_dma_segment seg;
2270 seg.ds_addr =
2271 (bus_addr_t)(vm_offset_t)csio->data_ptr;
2272 seg.ds_len = csio->dxfer_len;
2273 (*cb)(req, &seg, 1, 0);
2274 }
2275 } else {
2276 /*
2277 * We have been given a list of addresses.
2278 * This case could be easily supported but they are not
2279 * currently generated by the CAM subsystem so there
2280 * is no point in wasting the time right now.
2281 */
2282 struct bus_dma_segment *segs;
2283 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2284 (*cb)(req, NULL, 0, EFAULT);
2285 } else {
2286 /* Just use the segments provided */
2287 segs = (struct bus_dma_segment *)csio->data_ptr;
2288 (*cb)(req, segs, csio->sglist_cnt, 0);
2289 }
2290 }
2291 } else {
2292 (*cb)(req, NULL, 0, 0);
2293 }
2294}
2295
2296static int
2297mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2298 int sleep_ok)
2299{
2300 int error;
2301 uint16_t status;
2302 uint8_t response;
2303
2304 error = mpt_scsi_send_tmf(mpt,
2305 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2306 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2307 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2308 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2309 0, /* XXX How do I get the channel ID? */
2310 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2311 lun != CAM_LUN_WILDCARD ? lun : 0,
2312 0, sleep_ok);
2313
2314 if (error != 0) {
2315 /*
2316 * mpt_scsi_send_tmf hard resets on failure, so no
2317 * need to do so here.
2318 */
2319 mpt_prt(mpt,
2320 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2321 return (EIO);
2322 }
2323
2324 /* Wait for bus reset to be processed by the IOC. */
2325 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2326 REQ_STATE_DONE, sleep_ok, 5000);
2327
2328 status = le16toh(mpt->tmf_req->IOCStatus);
2329 response = mpt->tmf_req->ResponseCode;
2330 mpt->tmf_req->state = REQ_STATE_FREE;
2331
2332 if (error) {
2333 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2334 "Resetting controller.\n");
2335 mpt_reset(mpt, TRUE);
2336 return (ETIMEDOUT);
2337 }
2338
2339 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2340 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2341 "Resetting controller.\n", status);
2342 mpt_reset(mpt, TRUE);
2343 return (EIO);
2344 }
2345
2346 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2347 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2348 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2349 "Resetting controller.\n", response);
2350 mpt_reset(mpt, TRUE);
2351 return (EIO);
2352 }
2353 return (0);
2354}
2355
2356static int
2357mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2358{
2359 int r = 0;
2360 request_t *req;
2361 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2362
2363 req = mpt_get_request(mpt, FALSE);
2364 if (req == NULL) {
2365 return (ENOMEM);
2366 }
2367 fc = req->req_vbuf;
2368 memset(fc, 0, sizeof(*fc));
2369 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2370 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2371 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2372 mpt_send_cmd(mpt, req);
2373 if (dowait) {
2374 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2375 REQ_STATE_DONE, FALSE, 60 * 1000);
2376 if (r == 0) {
2377 mpt_free_request(mpt, req);
2378 }
2379 }
2380 return (r);
2381}
2382
2383static int
2384mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2385 MSG_EVENT_NOTIFY_REPLY *msg)
2386{
2387 uint32_t data0, data1;
2388
2389 data0 = le32toh(msg->Data[0]);
2390 data1 = le32toh(msg->Data[1]);
2391 switch(msg->Event & 0xFF) {
2392 case MPI_EVENT_UNIT_ATTENTION:
2393 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2394 (data0 >> 8) & 0xff, data0 & 0xff);
2395 break;
2396
2397 case MPI_EVENT_IOC_BUS_RESET:
2398 /* We generated a bus reset */
2399 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2400 (data0 >> 8) & 0xff);
2401 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2402 break;
2403
2404 case MPI_EVENT_EXT_BUS_RESET:
2405 /* Someone else generated a bus reset */
2406 mpt_prt(mpt, "External Bus Reset Detected\n");
2407 /*
2408 * These replies don't return EventData like the MPI
2409 * spec says they do
2410 */
2411 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2412 break;
2413
2414 case MPI_EVENT_RESCAN:
2415#if __FreeBSD_version >= 600000
2416 {
2417 union ccb *ccb;
2418 uint32_t pathid;
2419 /*
2420 * In general this means a device has been added to the loop.
2421 */
2422 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2423 if (mpt->ready == 0) {
2424 break;
2425 }
2426 if (mpt->phydisk_sim) {
2427 pathid = cam_sim_path(mpt->phydisk_sim);
2428 } else {
2429 pathid = cam_sim_path(mpt->sim);
2430 }
2431 MPTLOCK_2_CAMLOCK(mpt);
2432 /*
2433 * Allocate a CCB, create a wildcard path for this bus,
2434 * and schedule a rescan.
2435 */
2436 ccb = xpt_alloc_ccb_nowait();
2437 if (ccb == NULL) {
2438 mpt_prt(mpt, "unable to alloc CCB for rescan\n");
2439 CAMLOCK_2_MPTLOCK(mpt);
2440 break;
2441 }
2442
2443 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2444 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2445 CAMLOCK_2_MPTLOCK(mpt);
2446 mpt_prt(mpt, "unable to create path for rescan\n");
2447 xpt_free_ccb(ccb);
2448 break;
2449 }
2450 xpt_rescan(ccb);
2451 CAMLOCK_2_MPTLOCK(mpt);
2452 break;
2453 }
2454#else
2455 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2456 break;
2457#endif
2458 case MPI_EVENT_LINK_STATUS_CHANGE:
2459 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2460 (data1 >> 8) & 0xff,
2461 ((data0 & 0xff) == 0)? "Failed" : "Active");
2462 break;
2463
2464 case MPI_EVENT_LOOP_STATE_CHANGE:
2465 switch ((data0 >> 16) & 0xff) {
2466 case 0x01:
2467 mpt_prt(mpt,
2468 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2469 "(Loop Initialization)\n",
2470 (data1 >> 8) & 0xff,
2471 (data0 >> 8) & 0xff,
2472 (data0 ) & 0xff);
2473 switch ((data0 >> 8) & 0xff) {
2474 case 0xF7:
2475 if ((data0 & 0xff) == 0xF7) {
2476 mpt_prt(mpt, "Device needs AL_PA\n");
2477 } else {
2478 mpt_prt(mpt, "Device %02x doesn't like "
2479 "FC performance\n",
2480 data0 & 0xFF);
2481 }
2482 break;
2483 case 0xF8:
2484 if ((data0 & 0xff) == 0xF7) {
2485 mpt_prt(mpt, "Device had loop failure "
2486 "at its receiver prior to acquiring"
2487 " AL_PA\n");
2488 } else {
2489 mpt_prt(mpt, "Device %02x detected loop"
2490 " failure at its receiver\n",
2491 data0 & 0xFF);
2492 }
2493 break;
2494 default:
2495 mpt_prt(mpt, "Device %02x requests that device "
2496 "%02x reset itself\n",
2497 data0 & 0xFF,
2498 (data0 >> 8) & 0xFF);
2499 break;
2500 }
2501 break;
2502 case 0x02:
2503 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2504 "LPE(%02x,%02x) (Loop Port Enable)\n",
2505 (data1 >> 8) & 0xff, /* Port */
2506 (data0 >> 8) & 0xff, /* Character 3 */
2507 (data0 ) & 0xff /* Character 4 */);
2508 break;
2509 case 0x03:
2510 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2511 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2512 (data1 >> 8) & 0xff, /* Port */
2513 (data0 >> 8) & 0xff, /* Character 3 */
2514 (data0 ) & 0xff /* Character 4 */);
2515 break;
2516 default:
2517 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2518 "FC event (%02x %02x %02x)\n",
2519 (data1 >> 8) & 0xff, /* Port */
2520 (data0 >> 16) & 0xff, /* Event */
2521 (data0 >> 8) & 0xff, /* Character 3 */
2522 (data0 ) & 0xff /* Character 4 */);
2523 }
2524 break;
2525
2526 case MPI_EVENT_LOGOUT:
2527 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2528 (data1 >> 8) & 0xff, data0);
2529 break;
2530 case MPI_EVENT_QUEUE_FULL:
2531 {
2532 struct cam_sim *sim;
2533 struct cam_path *tmppath;
2534 struct ccb_relsim crs;
2535 PTR_EVENT_DATA_QUEUE_FULL pqf;
2536 lun_id_t lun_id;
2537
2538 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2539 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2540 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2541 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2542 if (mpt->phydisk_sim) {
2543 sim = mpt->phydisk_sim;
2544 } else {
2545 sim = mpt->sim;
2546 }
2547 MPTLOCK_2_CAMLOCK(mpt);
2548 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2549 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2550 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2551 mpt_prt(mpt, "unable to create a path to send "
2552 "XPT_REL_SIMQ");
2553 CAMLOCK_2_MPTLOCK(mpt);
2554 break;
2555 }
2556 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2557 crs.ccb_h.func_code = XPT_REL_SIMQ;
2558 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2559 crs.openings = pqf->CurrentDepth - 1;
2560 xpt_action((union ccb *)&crs);
2561 if (crs.ccb_h.status != CAM_REQ_CMP) {
2562 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2563 }
2564 xpt_free_path(tmppath);
2565 }
2566 CAMLOCK_2_MPTLOCK(mpt);
2567 break;
2568 }
2569 case MPI_EVENT_EVENT_CHANGE:
2570 case MPI_EVENT_INTEGRATED_RAID:
2571 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2572 case MPI_EVENT_SAS_SES:
2573 break;
2574 default:
2575 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
3c4c549a 2576 (unsigned)msg->Event & 0xFF);
2545bca0
MD
2577 return (0);
2578 }
2579 return (1);
2580}
2581
2582/*
2583 * Reply path for all SCSI I/O requests, called from our
2584 * interrupt handler by extracting our handler index from
2585 * the MsgContext field of the reply from the IOC.
2586 *
2587 * This routine is optimized for the common case of a
2588 * completion without error. All exception handling is
2589 * offloaded to non-inlined helper routines to minimize
2590 * cache footprint.
2591 */
2592static int
2593mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2594 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2595{
2596 MSG_SCSI_IO_REQUEST *scsi_req;
2597 union ccb *ccb;
2598
2599 if (req->state == REQ_STATE_FREE) {
2600 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2601 return (TRUE);
2602 }
2603
2604 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2605 ccb = req->ccb;
2606 if (ccb == NULL) {
2607 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2608 req, req->serno);
2609 return (TRUE);
2610 }
2611
2612 mpt_req_untimeout(req, mpt_timeout, ccb);
2613 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2614
2615 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2616 bus_dmasync_op_t op;
2617
2618 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2619 op = BUS_DMASYNC_POSTREAD;
2620 else
2621 op = BUS_DMASYNC_POSTWRITE;
2622 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2623 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2624 }
2625
2626 if (reply_frame == NULL) {
2627 /*
2628 * Context only reply, completion without error status.
2629 */
2630 ccb->csio.resid = 0;
2631 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2632 ccb->csio.scsi_status = SCSI_STATUS_OK;
2633 } else {
2634 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2635 }
2636
2637 if (mpt->outofbeer) {
2638 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2639 mpt->outofbeer = 0;
2640 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2641 }
2642 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2643 struct scsi_inquiry_data *iq =
2644 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2645 if (scsi_req->Function ==
2646 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2647 /*
2648 * Fake out the device type so that only the
2649 * pass-thru device will attach.
2650 */
2651 iq->device &= ~0x1F;
2652 iq->device |= T_NODEVICE;
2653 }
2654 }
2655 if (mpt->verbose == MPT_PRT_DEBUG) {
2656 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2657 req, req->serno);
2658 }
2659 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2660 MPTLOCK_2_CAMLOCK(mpt);
2661 xpt_done(ccb);
2662 CAMLOCK_2_MPTLOCK(mpt);
2663 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2664 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2665 } else {
2666 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2667 req, req->serno);
2668 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2669 }
2670 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2671 ("CCB req needed wakeup"));
2672#ifdef INVARIANTS
2673 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2674#endif
2675 mpt_free_request(mpt, req);
2676 return (TRUE);
2677}
2678
2679static int
2680mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2681 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2682{
2683 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2684
2685 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2686#ifdef INVARIANTS
2687 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2688#endif
2689 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2690 /* Record IOC Status and Response Code of TMF for any waiters. */
2691 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2692 req->ResponseCode = tmf_reply->ResponseCode;
2693
2694 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2695 req, req->serno, le16toh(tmf_reply->IOCStatus));
2696 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2697 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2698 req->state |= REQ_STATE_DONE;
2699 wakeup(req);
2700 } else {
2701 mpt->tmf_req->state = REQ_STATE_FREE;
2702 }
2703 return (TRUE);
2704}
2705
2706/*
2707 * XXX: Move to definitions file
2708 */
2709#define ELS 0x22
2710#define FC4LS 0x32
2711#define ABTS 0x81
2712#define BA_ACC 0x84
2713
2714#define LS_RJT 0x01
2715#define LS_ACC 0x02
2716#define PLOGI 0x03
2717#define LOGO 0x05
2718#define SRR 0x14
2719#define PRLI 0x20
2720#define PRLO 0x21
2721#define ADISC 0x52
2722#define RSCN 0x61
2723
2724static void
2725mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2726 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2727{
2728 uint32_t fl;
2729 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2730 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2731
2732 /*
2733 * We are going to reuse the ELS request to send this response back.
2734 */
2735 rsp = &tmp;
2736 memset(rsp, 0, sizeof(*rsp));
2737
2738#ifdef USE_IMMEDIATE_LINK_DATA
2739 /*
2740 * Apparently the IMMEDIATE stuff doesn't seem to work.
2741 */
2742 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2743#endif
2744 rsp->RspLength = length;
2745 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2746 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2747
2748 /*
2749 * Copy over information from the original reply frame to
2750 * it's correct place in the response.
2751 */
2752 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2753
2754 /*
2755 * And now copy back the temporary area to the original frame.
2756 */
2757 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2758 rsp = req->req_vbuf;
2759
2760#ifdef USE_IMMEDIATE_LINK_DATA
2761 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2762#else
2763{
2764 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2765 bus_addr_t paddr = req->req_pbuf;
2766 paddr += MPT_RQSL(mpt);
2767
2768 fl =
2769 MPI_SGE_FLAGS_HOST_TO_IOC |
2770 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2771 MPI_SGE_FLAGS_LAST_ELEMENT |
2772 MPI_SGE_FLAGS_END_OF_LIST |
2773 MPI_SGE_FLAGS_END_OF_BUFFER;
2774 fl <<= MPI_SGE_FLAGS_SHIFT;
2775 fl |= (length);
2776 se->FlagsLength = htole32(fl);
2777 se->Address = htole32((uint32_t) paddr);
2778}
2779#endif
2780
2781 /*
2782 * Send it on...
2783 */
2784 mpt_send_cmd(mpt, req);
2785}
2786
2787static int
2788mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2789 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2790{
2791 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2792 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2793 U8 rctl;
2794 U8 type;
2795 U8 cmd;
2796 U16 status = le16toh(reply_frame->IOCStatus);
2797 U32 *elsbuf;
2798 int ioindex;
2799 int do_refresh = TRUE;
2800
2801#ifdef INVARIANTS
2802 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2803 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2804 req, req->serno, rp->Function));
2805 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2806 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2807 } else {
2808 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2809 }
2810#endif
2811 mpt_lprt(mpt, MPT_PRT_DEBUG,
2812 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2813 req, req->serno, reply_frame, reply_frame->Function);
2814
2815 if (status != MPI_IOCSTATUS_SUCCESS) {
2816 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2817 status, reply_frame->Function);
2818 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2819 /*
2820 * XXX: to get around shutdown issue
2821 */
2822 mpt->disabled = 1;
2823 return (TRUE);
2824 }
2825 return (TRUE);
2826 }
2827
2828 /*
2829 * If the function of a link service response, we recycle the
2830 * response to be a refresh for a new link service request.
2831 *
2832 * The request pointer is bogus in this case and we have to fetch
2833 * it based upon the TransactionContext.
2834 */
2835 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2836 /* Freddie Uncle Charlie Katie */
2837 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2838 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2839 if (mpt->els_cmd_ptrs[ioindex] == req) {
2840 break;
2841 }
2842
2843 KASSERT(ioindex < mpt->els_cmds_allocated,
2844 ("can't find my mommie!"));
2845
2846 /* remove from active list as we're going to re-post it */
2847 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2848 req->state &= ~REQ_STATE_QUEUED;
2849 req->state |= REQ_STATE_DONE;
2850 mpt_fc_post_els(mpt, req, ioindex);
2851 return (TRUE);
2852 }
2853
2854 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2855 /* remove from active list as we're done */
2856 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2857 req->state &= ~REQ_STATE_QUEUED;
2858 req->state |= REQ_STATE_DONE;
2859 if (req->state & REQ_STATE_TIMEDOUT) {
2860 mpt_lprt(mpt, MPT_PRT_DEBUG,
2861 "Sync Primitive Send Completed After Timeout\n");
2862 mpt_free_request(mpt, req);
2863 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2864 mpt_lprt(mpt, MPT_PRT_DEBUG,
2865 "Async Primitive Send Complete\n");
2866 mpt_free_request(mpt, req);
2867 } else {
2868 mpt_lprt(mpt, MPT_PRT_DEBUG,
2869 "Sync Primitive Send Complete- Waking Waiter\n");
2870 wakeup(req);
2871 }
2872 return (TRUE);
2873 }
2874
2875 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2876 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2877 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2878 rp->MsgLength, rp->MsgFlags);
2879 return (TRUE);
2880 }
2881
2882 if (rp->MsgLength <= 5) {
2883 /*
2884 * This is just a ack of an original ELS buffer post
2885 */
2886 mpt_lprt(mpt, MPT_PRT_DEBUG,
2887 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2888 return (TRUE);
2889 }
2890
2891
2892 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2893 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2894
2895 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2896 cmd = be32toh(elsbuf[0]) >> 24;
2897
2898 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2899 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2900 return (TRUE);
2901 }
2902
2903 ioindex = le32toh(rp->TransactionContext);
2904 req = mpt->els_cmd_ptrs[ioindex];
2905
2906 if (rctl == ELS && type == 1) {
2907 switch (cmd) {
2908 case PRLI:
2909 /*
2910 * Send back a PRLI ACC
2911 */
2912 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2913 le32toh(rp->Wwn.PortNameHigh),
2914 le32toh(rp->Wwn.PortNameLow));
2915 elsbuf[0] = htobe32(0x02100014);
2916 elsbuf[1] |= htobe32(0x00000100);
2917 elsbuf[4] = htobe32(0x00000002);
2918 if (mpt->role & MPT_ROLE_TARGET)
2919 elsbuf[4] |= htobe32(0x00000010);
2920 if (mpt->role & MPT_ROLE_INITIATOR)
2921 elsbuf[4] |= htobe32(0x00000020);
2922 /* remove from active list as we're done */
2923 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2924 req->state &= ~REQ_STATE_QUEUED;
2925 req->state |= REQ_STATE_DONE;
2926 mpt_fc_els_send_response(mpt, req, rp, 20);
2927 do_refresh = FALSE;
2928 break;
2929 case PRLO:
2930 memset(elsbuf, 0, 5 * (sizeof (U32)));
2931 elsbuf[0] = htobe32(0x02100014);
2932 elsbuf[1] = htobe32(0x08000100);
2933 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2934 le32toh(rp->Wwn.PortNameHigh),
2935 le32toh(rp->Wwn.PortNameLow));
2936 /* remove from active list as we're done */
2937 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2938 req->state &= ~REQ_STATE_QUEUED;
2939 req->state |= REQ_STATE_DONE;
2940 mpt_fc_els_send_response(mpt, req, rp, 20);
2941 do_refresh = FALSE;
2942 break;
2943 default:
2944 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2945 break;
2946 }
2947 } else if (rctl == ABTS && type == 0) {
2948 uint16_t rx_id = le16toh(rp->Rxid);
2949 uint16_t ox_id = le16toh(rp->Oxid);
2950 request_t *tgt_req = NULL;
2951
2952 mpt_prt(mpt,
2953 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2954 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2955 le32toh(rp->Wwn.PortNameLow));
2956 if (rx_id >= mpt->mpt_max_tgtcmds) {
2957 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2958 } else if (mpt->tgt_cmd_ptrs == NULL) {
2959 mpt_prt(mpt, "No TGT CMD PTRS\n");
2960 } else {
2961 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2962 }
2963 if (tgt_req) {
2964 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2965 union ccb *ccb = tgt->ccb;
2966 uint32_t ct_id;
2967
2968 /*
2969 * Check to make sure we have the correct command
2970 * The reply descriptor in the target state should
2971 * should contain an IoIndex that should match the
2972 * RX_ID.
2973 *
2974 * It'd be nice to have OX_ID to crosscheck with
2975 * as well.
2976 */
2977 ct_id = GET_IO_INDEX(tgt->reply_desc);
2978
2979 if (ct_id != rx_id) {
2980 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2981 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2982 rx_id, ct_id);
2983 goto skip;
2984 }
2985
2986 ccb = tgt->ccb;
2987 if (ccb) {
2988 mpt_prt(mpt,
2989 "CCB (%p): lun %u flags %x status %x\n",
2990 ccb, ccb->ccb_h.target_lun,
2991 ccb->ccb_h.flags, ccb->ccb_h.status);
2992 }
2993 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2994 "%x nxfers %x\n", tgt->state,
2995 tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2996 tgt->nxfers);
2997 skip:
2998 if (mpt_abort_target_cmd(mpt, tgt_req)) {
2999 mpt_prt(mpt, "unable to start TargetAbort\n");
3000 }
3001 } else {
3002 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3003 }
3004 memset(elsbuf, 0, 5 * (sizeof (U32)));
3005 elsbuf[0] = htobe32(0);
3006 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3007 elsbuf[2] = htobe32(0x000ffff);
3008 /*
3009 * Dork with the reply frame so that the reponse to it
3010 * will be correct.
3011 */
3012 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3013 /* remove from active list as we're done */
3014 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3015 req->state &= ~REQ_STATE_QUEUED;
3016 req->state |= REQ_STATE_DONE;
3017 mpt_fc_els_send_response(mpt, req, rp, 12);
3018 do_refresh = FALSE;
3019 } else {
3020 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3021 }
3022 if (do_refresh == TRUE) {
3023 /* remove from active list as we're done */
3024 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3025 req->state &= ~REQ_STATE_QUEUED;
3026 req->state |= REQ_STATE_DONE;
3027 mpt_fc_post_els(mpt, req, ioindex);
3028 }
3029 return (TRUE);
3030}
3031
3032/*
3033 * Clean up all SCSI Initiator personality state in response
3034 * to a controller reset.
3035 */
3036static void
3037mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3038{
3039 /*
3040 * The pending list is already run down by
3041 * the generic handler. Perform the same
3042 * operation on the timed out request list.
3043 */
3044 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3045 MPI_IOCSTATUS_INVALID_STATE);
3046
3047 /*
3048 * XXX: We need to repost ELS and Target Command Buffers?
3049 */
3050
3051 /*
3052 * Inform the XPT that a bus reset has occurred.
3053 */
3054 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3055}
3056
3057/*
3058 * Parse additional completion information in the reply
3059 * frame for SCSI I/O requests.
3060 */
3061static int
3062mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3063 MSG_DEFAULT_REPLY *reply_frame)
3064{
3065 union ccb *ccb;
3066 MSG_SCSI_IO_REPLY *scsi_io_reply;
3067 u_int ioc_status;
3068 u_int sstate;
3069
3070 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3071 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3072 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3073 ("MPT SCSI I/O Handler called with incorrect reply type"));
3074 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3075 ("MPT SCSI I/O Handler called with continuation reply"));
3076
3077 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3078 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3079 ioc_status &= MPI_IOCSTATUS_MASK;
3080 sstate = scsi_io_reply->SCSIState;
3081
3082 ccb = req->ccb;
3083 ccb->csio.resid =
3084 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3085
3086 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3087 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3088 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3089 ccb->csio.sense_resid =
3090 ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount);
3091 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3092 min(ccb->csio.sense_len,
3093 le32toh(scsi_io_reply->SenseCount)));
3094 }
3095
3096 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3097 /*
3098 * Tag messages rejected, but non-tagged retry
3099 * was successful.
3100XXXX
3101 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3102 */
3103 }
3104
3105 switch(ioc_status) {
3106 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3107 /*
3108 * XXX
3109 * Linux driver indicates that a zero
3110 * transfer length with this error code
3111 * indicates a CRC error.
3112 *
3113 * No need to swap the bytes for checking
3114 * against zero.
3115 */
3116 if (scsi_io_reply->TransferCount == 0) {
3117 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3118 break;
3119 }
3120 /* FALLTHROUGH */
3121 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3122 case MPI_IOCSTATUS_SUCCESS:
3123 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3124 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3125 /*
3126 * Status was never returned for this transaction.
3127 */
3128 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3129 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3130 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3131 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3132 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3133 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3134 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3135
3136 /* XXX Handle SPI-Packet and FCP-2 reponse info. */
3137 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3138 } else
3139 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3140 break;
3141 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3142 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3143 break;
3144 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3145 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3146 break;
3147 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3148 /*
3149 * Since selection timeouts and "device really not
3150 * there" are grouped into this error code, report
3151 * selection timeout. Selection timeouts are
3152 * typically retried before giving up on the device
3153 * whereas "device not there" errors are considered
3154 * unretryable.
3155 */
3156 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3157 break;
3158 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3159 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3160 break;
3161 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3162 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3163 break;
3164 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3165 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3166 break;
3167 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3168 ccb->ccb_h.status = CAM_UA_TERMIO;
3169 break;
3170 case MPI_IOCSTATUS_INVALID_STATE:
3171 /*
3172 * The IOC has been reset. Emulate a bus reset.
3173 */
3174 /* FALLTHROUGH */
3175 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3176 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3177 break;
3178 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3179 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3180 /*
3181 * Don't clobber any timeout status that has
3182 * already been set for this transaction. We
3183 * want the SCSI layer to be able to differentiate
3184 * between the command we aborted due to timeout
3185 * and any innocent bystanders.
3186 */
3187 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3188 break;
3189 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3190 break;
3191
3192 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3193 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3194 break;
3195 case MPI_IOCSTATUS_BUSY:
3196 mpt_set_ccb_status(ccb, CAM_BUSY);
3197 break;
3198 case MPI_IOCSTATUS_INVALID_FUNCTION:
3199 case MPI_IOCSTATUS_INVALID_SGL:
3200 case MPI_IOCSTATUS_INTERNAL_ERROR:
3201 case MPI_IOCSTATUS_INVALID_FIELD:
3202 default:
3203 /* XXX
3204 * Some of the above may need to kick
3205 * of a recovery action!!!!
3206 */
3207 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3208 break;
3209 }
3210
3211 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3212 mpt_freeze_ccb(ccb);
3213 }
3214
3215 return (TRUE);
3216}
3217
3218static void
3219mpt_action(struct cam_sim *sim, union ccb *ccb)
3220{
3221 struct mpt_softc *mpt;
3222 struct ccb_trans_settings *cts;
3223 target_id_t tgt;
3224 lun_id_t lun;
3225 int raid_passthru;
3226
3227 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3228
3229 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3230 raid_passthru = (sim == mpt->phydisk_sim);
3231 MPT_LOCK_ASSERT(mpt);
3232
3233 tgt = ccb->ccb_h.target_id;
3234 lun = ccb->ccb_h.target_lun;
3235 if (raid_passthru &&
3236 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3237 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3238 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3239 CAMLOCK_2_MPTLOCK(mpt);
3240 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3241 MPTLOCK_2_CAMLOCK(mpt);
3242 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3243 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3244 xpt_done(ccb);
3245 return;
3246 }
3247 MPTLOCK_2_CAMLOCK(mpt);
3248 }
3249 ccb->ccb_h.ccb_mpt_ptr = mpt;
3250
3251 switch (ccb->ccb_h.func_code) {
3252 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3253 /*
3254 * Do a couple of preliminary checks...
3255 */
3256 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3257 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3258 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3259 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3260 break;
3261 }
3262 }
3263 /* Max supported CDB length is 16 bytes */
3264 /* XXX Unless we implement the new 32byte message type */
3265 if (ccb->csio.cdb_len >
3266 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3267 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3268 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3269 break;
3270 }
3271#ifdef MPT_TEST_MULTIPATH
3272 if (mpt->failure_id == ccb->ccb_h.target_id) {
3273 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3274 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3275 break;
3276 }
3277#endif
3278 ccb->csio.scsi_status = SCSI_STATUS_OK;
3279 mpt_start(sim, ccb);
3280 return;
3281
3282 case XPT_RESET_BUS:
3283 if (raid_passthru) {
3284 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3285 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3286 break;
3287 }
3288 case XPT_RESET_DEV:
3289 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3290 if (bootverbose) {
3291 xpt_print(ccb->ccb_h.path, "reset bus\n");
3292 }
3293 } else {
3294 xpt_print(ccb->ccb_h.path, "reset device\n");
3295 }
3296 CAMLOCK_2_MPTLOCK(mpt);
3297 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3298 MPTLOCK_2_CAMLOCK(mpt);
3299
3300 /*
3301 * mpt_bus_reset is always successful in that it
3302 * will fall back to a hard reset should a bus
3303 * reset attempt fail.
3304 */
3305 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3306 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3307 break;
3308
3309 case XPT_ABORT:
3310 {
3311 union ccb *accb = ccb->cab.abort_ccb;
3312 CAMLOCK_2_MPTLOCK(mpt);
3313 switch (accb->ccb_h.func_code) {
3314 case XPT_ACCEPT_TARGET_IO:
3315 case XPT_IMMED_NOTIFY:
3316 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3317 break;
3318 case XPT_CONT_TARGET_IO:
3319 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3320 ccb->ccb_h.status = CAM_UA_ABORT;
3321 break;
3322 case XPT_SCSI_IO:
3323 ccb->ccb_h.status = CAM_UA_ABORT;
3324 break;
3325 default:
3326 ccb->ccb_h.status = CAM_REQ_INVALID;
3327 break;
3328 }
3329 MPTLOCK_2_CAMLOCK(mpt);
3330 break;
3331 }
3332
3333#ifdef CAM_NEW_TRAN_CODE
3334#define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3335#else
3336#define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3337#endif
3338#define DP_DISC_ENABLE 0x1
3339#define DP_DISC_DISABL 0x2
3340#define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3341
3342#define DP_TQING_ENABLE 0x4
3343#define DP_TQING_DISABL 0x8
3344#define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3345
3346#define DP_WIDE 0x10
3347#define DP_NARROW 0x20
3348#define DP_WIDTH (DP_WIDE|DP_NARROW)
3349
3350#define DP_SYNC 0x40
3351
3352 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3353 {
3354#ifdef CAM_NEW_TRAN_CODE
3355 struct ccb_trans_settings_scsi *scsi;
3356 struct ccb_trans_settings_spi *spi;
3357#endif
3358 uint8_t dval;
3359 u_int period;
3360 u_int offset;
3361 int i, j;
3362
3363 cts = &ccb->cts;
3364
3365 if (mpt->is_fc || mpt->is_sas) {
3366 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3367 break;
3368 }
3369
3370#ifdef CAM_NEW_TRAN_CODE
3371 scsi = &cts->proto_specific.scsi;
3372 spi = &cts->xport_specific.spi;
3373
3374 /*
3375 * We can be called just to valid transport and proto versions
3376 */
3377 if (scsi->valid == 0 && spi->valid == 0) {
3378 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3379 break;
3380 }
3381#endif
3382
3383 /*
3384 * Skip attempting settings on RAID volume disks.
3385 * Other devices on the bus get the normal treatment.
3386 */
3387 if (mpt->phydisk_sim && raid_passthru == 0 &&
3388 mpt_is_raid_volume(mpt, tgt) != 0) {
3389 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3390 "no transfer settings for RAID vols\n");
3391 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3392 break;
3393 }
3394
3395 i = mpt->mpt_port_page2.PortSettings &
3396 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3397 j = mpt->mpt_port_page2.PortFlags &
3398 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3399 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3400 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3401 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3402 "honoring BIOS transfer negotiations\n");
3403 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3404 break;
3405 }
3406
3407 dval = 0;
3408 period = 0;
3409 offset = 0;
3410
3411#ifndef CAM_NEW_TRAN_CODE
3412 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3413 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3414 DP_DISC_ENABLE : DP_DISC_DISABL;
3415 }
3416
3417 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3418 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3419 DP_TQING_ENABLE : DP_TQING_DISABL;
3420 }
3421
3422 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3423 dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3424 }
3425
3426 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3427 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3428 dval |= DP_SYNC;
3429 period = cts->sync_period;
3430 offset = cts->sync_offset;
3431 }
3432#else
3433 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3434 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3435 DP_DISC_ENABLE : DP_DISC_DISABL;
3436 }
3437
3438 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3439 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3440 DP_TQING_ENABLE : DP_TQING_DISABL;
3441 }
3442
3443 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3444 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3445 DP_WIDE : DP_NARROW;
3446 }
3447
3448 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3449 dval |= DP_SYNC;
3450 offset = spi->sync_offset;
3451 } else {
3452 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3453 &mpt->mpt_dev_page1[tgt];
3454 offset = ptr->RequestedParameters;
3455 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3456 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3457 }
3458 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3459 dval |= DP_SYNC;
3460 period = spi->sync_period;
3461 } else {
3462 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3463 &mpt->mpt_dev_page1[tgt];
3464 period = ptr->RequestedParameters;
3465 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3466 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3467 }
3468#endif
3469 CAMLOCK_2_MPTLOCK(mpt);
3470 if (dval & DP_DISC_ENABLE) {
3471 mpt->mpt_disc_enable |= (1 << tgt);
3472 } else if (dval & DP_DISC_DISABL) {
3473 mpt->mpt_disc_enable &= ~(1 << tgt);
3474 }
3475 if (dval & DP_TQING_ENABLE) {
3476 mpt->mpt_tag_enable |= (1 << tgt);
3477 } else if (dval & DP_TQING_DISABL) {
3478 mpt->mpt_tag_enable &= ~(1 << tgt);
3479 }
3480 if (dval & DP_WIDTH) {
3481 mpt_setwidth(mpt, tgt, 1);
3482 }
3483 if (dval & DP_SYNC) {
3484 mpt_setsync(mpt, tgt, period, offset);
3485 }
3486 if (dval == 0) {
3487 MPTLOCK_2_CAMLOCK(mpt);
3488 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3489 break;
3490 }
3491 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3492 "set [%d]: 0x%x period 0x%x offset %d\n",
3493 tgt, dval, period, offset);
3494 if (mpt_update_spi_config(mpt, tgt)) {
3495 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3496 } else {
3497 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3498 }
3499 MPTLOCK_2_CAMLOCK(mpt);
3500 break;
3501 }
3502 case XPT_GET_TRAN_SETTINGS:
3503 {
3504#ifdef CAM_NEW_TRAN_CODE
3505 struct ccb_trans_settings_scsi *scsi;
3506 cts = &ccb->cts;
3507 cts->protocol = PROTO_SCSI;
3508 if (mpt->is_fc) {
3509 struct ccb_trans_settings_fc *fc =
3510 &cts->xport_specific.fc;
3511 cts->protocol_version = SCSI_REV_SPC;
3512 cts->transport = XPORT_FC;
3513 cts->transport_version = 0;
3514 fc->valid = CTS_FC_VALID_SPEED;
3515 fc->bitrate = 100000;
3516 } else if (mpt->is_sas) {
3517 struct ccb_trans_settings_sas *sas =
3518 &cts->xport_specific.sas;
3519 cts->protocol_version = SCSI_REV_SPC2;
3520 cts->transport = XPORT_SAS;
3521 cts->transport_version = 0;
3522 sas->valid = CTS_SAS_VALID_SPEED;
3523 sas->bitrate = 300000;
3524 } else {
3525 cts->protocol_version = SCSI_REV_2;
3526 cts->transport = XPORT_SPI;
3527 cts->transport_version = 2;
3528 if (mpt_get_spi_settings(mpt, cts) != 0) {
3529 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3530 break;
3531 }
3532 }
3533 scsi = &cts->proto_specific.scsi;
3534 scsi->valid = CTS_SCSI_VALID_TQ;
3535 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3536#else
3537 cts = &ccb->cts;
3538 if (mpt->is_fc) {
3539 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3540 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3541 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3542 } else if (mpt->is_sas) {
3543 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3544 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3545 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3546 } else if (mpt_get_spi_settings(mpt, cts) != 0) {
3547 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3548 break;
3549 }
3550#endif
3551 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3552 break;
3553 }
3554 case XPT_CALC_GEOMETRY:
3555 {
3556 struct ccb_calc_geometry *ccg;
3557
3558 ccg = &ccb->ccg;
3559 if (ccg->block_size == 0) {
3560 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3561 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3562 break;
3563 }
3564 mpt_calc_geometry(ccg, /*extended*/1);
3565 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3566 break;
3567 }
3568 case XPT_PATH_INQ: /* Path routing inquiry */
3569 {
3570 struct ccb_pathinq *cpi = &ccb->cpi;
3571
3572 cpi->version_num = 1;
3573 cpi->target_sprt = 0;
3574 cpi->hba_eng_cnt = 0;
3575 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3576 /*
3577 * FC cards report MAX_DEVICES of 512, but
3578 * the MSG_SCSI_IO_REQUEST target id field
3579 * is only 8 bits. Until we fix the driver
3580 * to support 'channels' for bus overflow,
3581 * just limit it.
3582 */
3583 if (cpi->max_target > 255) {
3584 cpi->max_target = 255;
3585 }
3586
3587 /*
3588 * VMware ESX reports > 16 devices and then dies when we probe.
3589 */
3590 if (mpt->is_spi && cpi->max_target > 15) {
3591 cpi->max_target = 15;
3592 }
3593 if (mpt->is_spi)
3594 cpi->max_lun = 7;
3595 else
3596 cpi->max_lun = MPT_MAX_LUNS;
3597 cpi->initiator_id = mpt->mpt_ini_id;
3598 cpi->bus_id = cam_sim_bus(sim);
3599
3600 /*
3601 * The base speed is the speed of the underlying connection.
3602 */
3603#ifdef CAM_NEW_TRAN_CODE
3604 cpi->protocol = PROTO_SCSI;
3605 if (mpt->is_fc) {
3606 cpi->hba_misc = PIM_NOBUSRESET;
3607 cpi->base_transfer_speed = 100000;
3608 cpi->hba_inquiry = PI_TAG_ABLE;
3609 cpi->transport = XPORT_FC;
3610 cpi->transport_version = 0;
3611 cpi->protocol_version = SCSI_REV_SPC;
3612 } else if (mpt->is_sas) {
3613 cpi->hba_misc = PIM_NOBUSRESET;
3614 cpi->base_transfer_speed = 300000;
3615 cpi->hba_inquiry = PI_TAG_ABLE;
3616 cpi->transport = XPORT_SAS;
3617 cpi->transport_version = 0;
3618 cpi->protocol_version = SCSI_REV_SPC2;
3619 } else {
3620 cpi->hba_misc = PIM_SEQSCAN;
3621 cpi->base_transfer_speed = 3300;
3622 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3623 cpi->transport = XPORT_SPI;
3624 cpi->transport_version = 2;
3625 cpi->protocol_version = SCSI_REV_2;
3626 }
3627#else
3628 if (mpt->is_fc) {
3629 cpi->hba_misc = PIM_NOBUSRESET;
3630 cpi->base_transfer_speed = 100000;
3631 cpi->hba_inquiry = PI_TAG_ABLE;
3632 } else if (mpt->is_sas) {
3633 cpi->hba_misc = PIM_NOBUSRESET;
3634 cpi->base_transfer_speed = 300000;
3635 cpi->hba_inquiry = PI_TAG_ABLE;
3636 } else {
3637 cpi->hba_misc = PIM_SEQSCAN;
3638 cpi->base_transfer_speed = 3300;
3639 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3640 }
3641#endif
3642
3643 /*
3644 * We give our fake RAID passhtru bus a width that is MaxVolumes
3645 * wide and restrict it to one lun.
3646 */
3647 if (raid_passthru) {
3648 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3649 cpi->initiator_id = cpi->max_target + 1;
3650 cpi->max_lun = 0;
3651 }
3652
3653 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3654 cpi->hba_misc |= PIM_NOINITIATOR;
3655 }
3656 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3657 cpi->target_sprt =
3658 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3659 } else {
3660 cpi->target_sprt = 0;
3661 }
3662 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3663 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3664 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3665 cpi->unit_number = cam_sim_unit(sim);
3666 cpi->ccb_h.status = CAM_REQ_CMP;
3667 break;
3668 }
3669 case XPT_EN_LUN: /* Enable LUN as a target */
3670 {
3671 int result;
3672
3673 CAMLOCK_2_MPTLOCK(mpt);
3674 if (ccb->cel.enable)
3675 result = mpt_enable_lun(mpt,
3676 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3677 else
3678 result = mpt_disable_lun(mpt,
3679 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3680 MPTLOCK_2_CAMLOCK(mpt);
3681 if (result == 0) {
3682 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3683 } else {
3684 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3685 }
3686 break;
3687 }
3688 case XPT_NOTIFY_ACK: /* recycle notify ack */
3689 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
3690 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3691 {
3692 tgt_resource_t *trtp;
3693 lun_id_t lun = ccb->ccb_h.target_lun;
3694 ccb->ccb_h.sim_priv.entries[0].field = 0;
3695 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3696 ccb->ccb_h.flags = 0;
3697
3698 if (lun == CAM_LUN_WILDCARD) {
3699 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3700 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3701 break;
3702 }
3703 trtp = &mpt->trt_wildcard;
3704 } else if (lun >= MPT_MAX_LUNS) {
3705 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3706 break;
3707 } else {
3708 trtp = &mpt->trt[lun];
3709 }
3710 CAMLOCK_2_MPTLOCK(mpt);
3711 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3712 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3713 "Put FREE ATIO %p lun %d\n", ccb, lun);
3714 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3715 sim_links.stqe);
3716 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3717 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3718 "Put FREE INOT lun %d\n", lun);
3719 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3720 sim_links.stqe);
3721 } else {
3722 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3723 }
3724 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3725 MPTLOCK_2_CAMLOCK(mpt);
3726 return;
3727 }
3728 case XPT_CONT_TARGET_IO:
3729 CAMLOCK_2_MPTLOCK(mpt);
3730 mpt_target_start_io(mpt, ccb);
3731 MPTLOCK_2_CAMLOCK(mpt);
3732 return;
3733
3734 default:
3735 ccb->ccb_h.status = CAM_REQ_INVALID;
3736 break;
3737 }
3738 xpt_done(ccb);
3739}
3740
3741static int
3742mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3743{
3744#ifdef CAM_NEW_TRAN_CODE
3745 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3746 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3747#endif
3748 target_id_t tgt;
3749 uint32_t dval, pval, oval;
3750 int rv;
3751
3752 if (IS_CURRENT_SETTINGS(cts) == 0) {
3753 tgt = cts->ccb_h.target_id;
3754 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3755 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3756 return (-1);
3757 }
3758 } else {
3759 tgt = cts->ccb_h.target_id;
3760 }
3761
3762 /*
3763 * We aren't looking at Port Page 2 BIOS settings here-
3764 * sometimes these have been known to be bogus XXX.
3765 *
3766 * For user settings, we pick the max from port page 0
3767 *
3768 * For current settings we read the current settings out from
3769 * device page 0 for that target.
3770 */
3771 if (IS_CURRENT_SETTINGS(cts)) {
3772 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3773 dval = 0;
3774
3775 CAMLOCK_2_MPTLOCK(mpt);
3776 tmp = mpt->mpt_dev_page0[tgt];
3777 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3778 sizeof(tmp), FALSE, 5000);
3779 if (rv) {
3780 MPTLOCK_2_CAMLOCK(mpt);
3781 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3782 return (rv);
3783 }
3784 mpt2host_config_page_scsi_device_0(&tmp);
3785
3786 MPTLOCK_2_CAMLOCK(mpt);
3787 mpt_lprt(mpt, MPT_PRT_DEBUG,
3c4c549a
MD
3788 "mpt_get_spi_settings[%d]: current NP %x Info %x\n",
3789 tgt,
3790 (unsigned)tmp.NegotiatedParameters,
3791 (unsigned)tmp.Information);
2545bca0
MD
3792 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3793 DP_WIDE : DP_NARROW;
3794 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3795 DP_DISC_ENABLE : DP_DISC_DISABL;
3796 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3797 DP_TQING_ENABLE : DP_TQING_DISABL;
3798 oval = tmp.NegotiatedParameters;
3799 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3800 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3801 pval = tmp.NegotiatedParameters;
3802 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3803 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3804 mpt->mpt_dev_page0[tgt] = tmp;
3805 } else {
3806 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3807 oval = mpt->mpt_port_page0.Capabilities;
3808 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3809 pval = mpt->mpt_port_page0.Capabilities;
3810 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3811 }
3812
3813#ifndef CAM_NEW_TRAN_CODE
3814 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3815 cts->valid = 0;
3816 cts->sync_period = pval;
3817 cts->sync_offset = oval;
3818 cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3819 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3820 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3821 if (dval & DP_WIDE) {
3822 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3823 } else {
3824 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3825 }
3826 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3827 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3828 if (dval & DP_DISC_ENABLE) {
3829 cts->flags |= CCB_TRANS_DISC_ENB;
3830 }
3831 if (dval & DP_TQING_ENABLE) {
3832 cts->flags |= CCB_TRANS_TAG_ENB;
3833 }
3834 }
3835#else
3836 spi->valid = 0;
3837 scsi->valid = 0;
3838 spi->flags = 0;
3839 scsi->flags = 0;
3840 spi->sync_offset = oval;
3841 spi->sync_period = pval;
3842 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3843 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3844 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3845 if (dval & DP_WIDE) {
3846 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3847 } else {
3848 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3849 }
3850 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3851 scsi->valid = CTS_SCSI_VALID_TQ;
3852 if (dval & DP_TQING_ENABLE) {
3853 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3854 }
3855 spi->valid |= CTS_SPI_VALID_DISC;
3856 if (dval & DP_DISC_ENABLE) {
3857 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3858 }
3859 }
3860#endif
3861 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3862 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3863 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3864 return (0);
3865}
3866
3867static void
3868mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3869{
3870 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3871
3872 ptr = &mpt->mpt_dev_page1[tgt];
3873 if (onoff) {
3874 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3875 } else {
3876 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3877 }
3878}
3879
3880static void
3881mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3882{
3883 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3884
3885 ptr = &mpt->mpt_dev_page1[tgt];
3886 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3887 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3888 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3889 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3890 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3891 if (period == 0) {
3892 return;
3893 }
3894 ptr->RequestedParameters |=
3895 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3896 ptr->RequestedParameters |=
3897 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3898 if (period < 0xa) {
3899 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3900 }
3901 if (period < 0x9) {
3902 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3903 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3904 }
3905}
3906
3907static int
3908mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3909{
3910 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3911 int rv;
3912
3913 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3914 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3c4c549a
MD
3915 tgt,
3916 (unsigned)mpt->mpt_dev_page1[tgt].RequestedParameters);
2545bca0
MD
3917 tmp = mpt->mpt_dev_page1[tgt];
3918 host2mpt_config_page_scsi_device_1(&tmp);
3919 rv = mpt_write_cur_cfg_page(mpt, tgt,
3920 &tmp.Header, sizeof(tmp), FALSE, 5000);
3921 if (rv) {
3922 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3923 return (-1);
3924 }
3925 return (0);
3926}
3927
3928static void
3929mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3930{
3931#if __FreeBSD_version >= 500000
3932 cam_calc_geometry(ccg, extended);
3933#else
3934 uint32_t size_mb;
3935 uint32_t secs_per_cylinder;
3936
3937 if (ccg->block_size == 0) {
3938 ccg->ccb_h.status = CAM_REQ_INVALID;
3939 return;
3940 }
3941 size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3942 if (size_mb > 1024 && extended) {
3943 ccg->heads = 255;
3944 ccg->secs_per_track = 63;
3945 } else {
3946 ccg->heads = 64;
3947 ccg->secs_per_track = 32;
3948 }
3949 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3950 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3951 ccg->ccb_h.status = CAM_REQ_CMP;
3952#endif
3953}
3954
3955/****************************** Timeout Recovery ******************************/
3956static int
3957mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3958{
3959 int error;
3960
3961 error = mpt_kthread_create(mpt_recovery_thread, mpt,
3962 &mpt->recovery_thread, /*flags*/0,
3963 /*altstack*/0, "mpt_recovery%d", mpt->unit);
3964 return (error);
3965}
3966
3967static void
3968mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3969{
3970 if (mpt->recovery_thread == NULL) {
3971 return;
3972 }
3973 mpt->shutdwn_recovery = 1;
3974 wakeup(mpt);
3975 /*
3976 * Sleep on a slightly different location
3977 * for this interlock just for added safety.
3978 */
3979 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0);
3980}
3981
3982static void
3983mpt_recovery_thread(void *arg)
3984{
3985 struct mpt_softc *mpt;
3986
3987 mpt = (struct mpt_softc *)arg;
3988 MPT_LOCK(mpt);
3989 for (;;) {
3990 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3991 if (mpt->shutdwn_recovery == 0) {
3992 mpt_sleep(mpt, mpt, PUSER, "idle", 0);
3993 }
3994 }
3995 if (mpt->shutdwn_recovery != 0) {
3996 break;
3997 }
3998 mpt_recover_commands(mpt);
3999 }
4000 mpt->recovery_thread = NULL;
4001 wakeup(&mpt->recovery_thread);
4002 MPT_UNLOCK(mpt);
4003 mpt_kthread_exit(0);
4004}
4005
4006static int
4007mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
4008 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
4009{
4010 MSG_SCSI_TASK_MGMT *tmf_req;
4011 int error;
4012
4013 /*
4014 * Wait for any current TMF request to complete.
4015 * We're only allowed to issue one TMF at a time.
4016 */
4017 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
4018 sleep_ok, MPT_TMF_MAX_TIMEOUT);
4019 if (error != 0) {
4020 mpt_reset(mpt, TRUE);
4021 return (ETIMEDOUT);
4022 }
4023
4024 mpt_assign_serno(mpt, mpt->tmf_req);
4025 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
4026
4027 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
4028 memset(tmf_req, 0, sizeof(*tmf_req));
4029 tmf_req->TargetID = target;
4030 tmf_req->Bus = channel;
4031 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4032 tmf_req->TaskType = type;
4033 tmf_req->MsgFlags = flags;
4034 tmf_req->MsgContext =
4035 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
4036 if (lun > MPT_MAX_LUNS) {
4037 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4038 tmf_req->LUN[1] = lun & 0xff;
4039 } else {
4040 tmf_req->LUN[1] = lun;
4041 }
4042 tmf_req->TaskMsgContext = abort_ctx;
4043
4044 mpt_lprt(mpt, MPT_PRT_DEBUG,
3c4c549a
MD
4045 "Issuing TMF %p:%u with MsgContext of 0x%x\n",
4046 mpt->tmf_req,
4047 (unsigned)mpt->tmf_req->serno,
4048 (unsigned)tmf_req->MsgContext);
2545bca0
MD
4049 if (mpt->verbose > MPT_PRT_DEBUG) {
4050 mpt_print_request(tmf_req);
4051 }
4052
4053 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4054 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4055 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4056 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4057 if (error != MPT_OK) {
4058 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4059 mpt->tmf_req->state = REQ_STATE_FREE;
4060 mpt_reset(mpt, TRUE);
4061 }
4062 return (error);
4063}
4064
4065/*
4066 * When a command times out, it is placed on the requeust_timeout_list
4067 * and we wake our recovery thread. The MPT-Fusion architecture supports
4068 * only a single TMF operation at a time, so we serially abort/bdr, etc,
4069 * the timedout transactions. The next TMF is issued either by the
4070 * completion handler of the current TMF waking our recovery thread,
4071 * or the TMF timeout handler causing a hard reset sequence.
4072 */
4073static void
4074mpt_recover_commands(struct mpt_softc *mpt)
4075{
4076 request_t *req;
4077 union ccb *ccb;
4078 int error;
4079
4080 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4081 /*
4082 * No work to do- leave.
4083 */
4084 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4085 return;
4086 }
4087
4088 /*
4089 * Flush any commands whose completion coincides with their timeout.
4090 */
4091 mpt_intr(mpt);
4092
4093 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4094 /*
4095 * The timedout commands have already
4096 * completed. This typically means
4097 * that either the timeout value was on
4098 * the hairy edge of what the device
4099 * requires or - more likely - interrupts
4100 * are not happening.
4101 */
4102 mpt_prt(mpt, "Timedout requests already complete. "
4103 "Interrupts may not be functioning.\n");
4104 mpt_enable_ints(mpt);
4105 return;
4106 }
4107
4108 /*
4109 * We have no visibility into the current state of the
4110 * controller, so attempt to abort the commands in the
4111 * order they timed-out. For initiator commands, we
4112 * depend on the reply handler pulling requests off
4113 * the timeout list.
4114 */
4115 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4116 uint16_t status;
4117 uint8_t response;
4118 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4119
4120 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4121 req, req->serno, hdrp->Function);
4122 ccb = req->ccb;
4123 if (ccb == NULL) {
4124 mpt_prt(mpt, "null ccb in timed out request. "
4125 "Resetting Controller.\n");
4126 mpt_reset(mpt, TRUE);
4127 continue;
4128 }
4129 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4130
4131 /*
4132 * Check to see if this is not an initiator command and
4133 * deal with it differently if it is.
4134 */
4135 switch (hdrp->Function) {
4136 case MPI_FUNCTION_SCSI_IO_REQUEST:
4137 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4138 break;
4139 default:
4140 /*
4141 * XXX: FIX ME: need to abort target assists...
4142 */
4143 mpt_prt(mpt, "just putting it back on the pend q\n");
4144 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4145 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4146 links);
4147 continue;
4148 }
4149
4150 error = mpt_scsi_send_tmf(mpt,
4151 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4152 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4153 htole32(req->index | scsi_io_handler_id), TRUE);
4154
4155 if (error != 0) {
4156 /*
4157 * mpt_scsi_send_tmf hard resets on failure, so no
4158 * need to do so here. Our queue should be emptied