iwm: Fix S:N reporting in ifconfig(8)
[dragonfly.git] / sys / dev / disk / mpt / mpt_cam.c
CommitLineData
2545bca0
MD
1/*-
2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
41 * redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
63 */
64/*-
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
69 *
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
72 * met:
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
79 * redistribution.
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
83 *
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32af04f7 95 *
4c42baf4 96 * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.84 2012/02/11 12:03:44 marius Exp $
2545bca0 97 */
2545bca0 98
cec957e9
MD
99#include <bus/cam/cam.h>
100#include <bus/cam/cam_ccb.h>
101#include <bus/cam/cam_xpt.h>
102#include <bus/cam/cam_xpt_periph.h>
103
2545bca0
MD
104#include <dev/disk/mpt/mpt.h>
105#include <dev/disk/mpt/mpt_cam.h>
106#include <dev/disk/mpt/mpt_raid.h>
107
108#include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
109#include "dev/disk/mpt/mpilib/mpi_init.h"
110#include "dev/disk/mpt/mpilib/mpi_targ.h"
111#include "dev/disk/mpt/mpilib/mpi_fc.h"
112#include "dev/disk/mpt/mpilib/mpi_sas.h"
2545bca0
MD
113#include <sys/callout.h>
114#include <sys/kthread.h>
f582582c 115#include <sys/sysctl.h>
2545bca0 116
2545bca0
MD
117static void mpt_poll(struct cam_sim *);
118static timeout_t mpt_timeout;
119static void mpt_action(struct cam_sim *, union ccb *);
120static int
121mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
122static void mpt_setwidth(struct mpt_softc *, int, int);
123static void mpt_setsync(struct mpt_softc *, int, int, int);
124static int mpt_update_spi_config(struct mpt_softc *, int);
2545bca0
MD
125
126static mpt_reply_handler_t mpt_scsi_reply_handler;
127static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
128static mpt_reply_handler_t mpt_fc_els_reply_handler;
129static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
130 MSG_DEFAULT_REPLY *);
131static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
132static int mpt_fc_reset_link(struct mpt_softc *, int);
133
134static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
135static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
136static void mpt_recovery_thread(void *arg);
137static void mpt_recover_commands(struct mpt_softc *mpt);
138
139static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
140 u_int, u_int, u_int, int);
141
142static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
143static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
144static int mpt_add_els_buffers(struct mpt_softc *mpt);
145static int mpt_add_target_commands(struct mpt_softc *mpt);
146static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
147static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
148static void mpt_target_start_io(struct mpt_softc *, union ccb *);
149static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
150static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
151static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
152 uint8_t, uint8_t const *);
153static void
154mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
155 tgt_resource_t *, int);
156static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
157static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
158static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
159static mpt_reply_handler_t mpt_sata_pass_reply_handler;
160
161static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
162static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
163static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
164static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
165
166static mpt_probe_handler_t mpt_cam_probe;
167static mpt_attach_handler_t mpt_cam_attach;
168static mpt_enable_handler_t mpt_cam_enable;
169static mpt_ready_handler_t mpt_cam_ready;
170static mpt_event_handler_t mpt_cam_event;
171static mpt_reset_handler_t mpt_cam_ioc_reset;
172static mpt_detach_handler_t mpt_cam_detach;
173
174static struct mpt_personality mpt_cam_personality =
175{
176 .name = "mpt_cam",
177 .probe = mpt_cam_probe,
178 .attach = mpt_cam_attach,
179 .enable = mpt_cam_enable,
180 .ready = mpt_cam_ready,
181 .event = mpt_cam_event,
182 .reset = mpt_cam_ioc_reset,
183 .detach = mpt_cam_detach,
184};
185
186DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
187MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
188
189int mpt_enable_sata_wc = -1;
190TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
191
4c42baf4 192static int
2545bca0
MD
193mpt_cam_probe(struct mpt_softc *mpt)
194{
195 int role;
196
197 /*
198 * Only attach to nodes that support the initiator or target role
199 * (or want to) or have RAID physical devices that need CAM pass-thru
200 * support.
201 */
202 if (mpt->do_cfg_role) {
203 role = mpt->cfg_role;
204 } else {
205 role = mpt->role;
206 }
207 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
208 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
209 return (0);
210 }
211 return (ENODEV);
212}
213
4c42baf4 214static int
2545bca0
MD
215mpt_cam_attach(struct mpt_softc *mpt)
216{
217 struct cam_devq *devq;
218 mpt_handler_t handler;
219 int maxq;
220 int error;
221
222 MPT_LOCK(mpt);
223 TAILQ_INIT(&mpt->request_timeout_list);
224 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
225 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
226
227 handler.reply_handler = mpt_scsi_reply_handler;
228 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
229 &scsi_io_handler_id);
230 if (error != 0) {
231 MPT_UNLOCK(mpt);
232 goto cleanup;
233 }
234
235 handler.reply_handler = mpt_scsi_tmf_reply_handler;
236 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
237 &scsi_tmf_handler_id);
238 if (error != 0) {
239 MPT_UNLOCK(mpt);
240 goto cleanup;
241 }
242
243 /*
244 * If we're fibre channel and could support target mode, we register
245 * an ELS reply handler and give it resources.
246 */
247 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
248 handler.reply_handler = mpt_fc_els_reply_handler;
249 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
250 &fc_els_handler_id);
251 if (error != 0) {
252 MPT_UNLOCK(mpt);
253 goto cleanup;
254 }
255 if (mpt_add_els_buffers(mpt) == FALSE) {
256 error = ENOMEM;
257 MPT_UNLOCK(mpt);
258 goto cleanup;
259 }
260 maxq -= mpt->els_cmds_allocated;
261 }
262
263 /*
264 * If we support target mode, we register a reply handler for it,
265 * but don't add command resources until we actually enable target
266 * mode.
267 */
268 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
269 handler.reply_handler = mpt_scsi_tgt_reply_handler;
270 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
271 &mpt->scsi_tgt_handler_id);
272 if (error != 0) {
273 MPT_UNLOCK(mpt);
274 goto cleanup;
275 }
276 }
277
278 if (mpt->is_sas) {
279 handler.reply_handler = mpt_sata_pass_reply_handler;
280 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
281 &sata_pass_handler_id);
282 if (error != 0) {
283 MPT_UNLOCK(mpt);
284 goto cleanup;
285 }
286 }
287
288 /*
289 * We keep one request reserved for timeout TMF requests.
290 */
291 mpt->tmf_req = mpt_get_request(mpt, FALSE);
292 if (mpt->tmf_req == NULL) {
293 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
294 error = ENOMEM;
295 MPT_UNLOCK(mpt);
296 goto cleanup;
297 }
298
299 /*
300 * Mark the request as free even though not on the free list.
301 * There is only one TMF request allowed to be outstanding at
302 * a time and the TMF routines perform their own allocation
303 * tracking using the standard state flags.
304 */
305 mpt->tmf_req->state = REQ_STATE_FREE;
306 maxq--;
307
308 /*
309 * The rest of this is CAM foo, for which we need to drop our lock
310 */
311 MPT_UNLOCK(mpt);
312
313 if (mpt_spawn_recovery_thread(mpt) != 0) {
314 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
315 error = ENOMEM;
316 goto cleanup;
317 }
318
319 /*
320 * Create the device queue for our SIM(s).
321 */
322 devq = cam_simq_alloc(maxq);
323 if (devq == NULL) {
324 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
325 error = ENOMEM;
326 goto cleanup;
327 }
328
329 /*
330 * Construct our SIM entry.
331 */
332 mpt->sim =
333 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
334 if (mpt->sim == NULL) {
335 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
336 cam_devq_release(devq);
337 error = ENOMEM;
338 goto cleanup;
339 }
340
341 /*
342 * Register exactly this bus.
343 */
344 MPT_LOCK(mpt);
f582582c 345 if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
2545bca0
MD
346 mpt_prt(mpt, "Bus registration Failed!\n");
347 error = ENOMEM;
348 MPT_UNLOCK(mpt);
349 goto cleanup;
350 }
351
352 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
353 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
354 mpt_prt(mpt, "Unable to allocate Path!\n");
355 error = ENOMEM;
356 MPT_UNLOCK(mpt);
357 goto cleanup;
358 }
359 MPT_UNLOCK(mpt);
360
361 /*
362 * Only register a second bus for RAID physical
363 * devices if the controller supports RAID.
364 */
365 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
366 return (0);
367 }
368
369 /*
370 * Create a "bus" to export all hidden disks to CAM.
371 */
372 mpt->phydisk_sim =
373 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
374 if (mpt->phydisk_sim == NULL) {
375 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
376 error = ENOMEM;
377 goto cleanup;
378 }
379
380 /*
381 * Register this bus.
382 */
383 MPT_LOCK(mpt);
f582582c 384 if (xpt_bus_register(mpt->phydisk_sim, 1) !=
2545bca0
MD
385 CAM_SUCCESS) {
386 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
387 error = ENOMEM;
388 MPT_UNLOCK(mpt);
389 goto cleanup;
390 }
391
392 if (xpt_create_path(&mpt->phydisk_path, NULL,
393 cam_sim_path(mpt->phydisk_sim),
394 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
395 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
396 error = ENOMEM;
397 MPT_UNLOCK(mpt);
398 goto cleanup;
399 }
400 MPT_UNLOCK(mpt);
401 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
402 return (0);
403
404cleanup:
405 mpt_cam_detach(mpt);
406 return (error);
407}
408
409/*
410 * Read FC configuration information
411 */
412static int
413mpt_read_config_info_fc(struct mpt_softc *mpt)
414{
26595b18
SW
415 struct sysctl_ctx_list *ctx;
416 struct sysctl_oid *tree;
2545bca0
MD
417 char *topology = NULL;
418 int rv;
419
420 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
421 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
422 if (rv) {
423 return (-1);
424 }
425 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
426 mpt->mpt_fcport_page0.Header.PageVersion,
427 mpt->mpt_fcport_page0.Header.PageLength,
428 mpt->mpt_fcport_page0.Header.PageNumber,
429 mpt->mpt_fcport_page0.Header.PageType);
430
431
432 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
433 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
434 if (rv) {
435 mpt_prt(mpt, "failed to read FC Port Page 0\n");
436 return (-1);
437 }
438 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
439
440 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
441
442 switch (mpt->mpt_fcport_page0.Flags &
443 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
444 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
445 mpt->mpt_fcport_speed = 0;
446 topology = "<NO LOOP>";
447 break;
448 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
449 topology = "N-Port";
450 break;
451 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
452 topology = "NL-Port";
453 break;
454 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
455 topology = "F-Port";
456 break;
457 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
458 topology = "FL-Port";
459 break;
460 default:
461 mpt->mpt_fcport_speed = 0;
462 topology = "?";
463 break;
464 }
465
466 mpt_lprt(mpt, MPT_PRT_INFO,
467 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
468 "Speed %u-Gbit\n", topology,
6d259fc1
SW
469 mpt->mpt_fcport_page0.WWNN.High,
470 mpt->mpt_fcport_page0.WWNN.Low,
471 mpt->mpt_fcport_page0.WWPN.High,
472 mpt->mpt_fcport_page0.WWPN.Low,
473 mpt->mpt_fcport_speed);
2545bca0 474 MPT_UNLOCK(mpt);
26595b18
SW
475 ctx = device_get_sysctl_ctx(mpt->dev);
476 tree = device_get_sysctl_tree(mpt->dev);
477
478 ksnprintf(mpt->scinfo.fc.wwnn,
479 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
480 mpt->mpt_fcport_page0.WWNN.High,
481 mpt->mpt_fcport_page0.WWNN.Low);
482
483 ksnprintf(mpt->scinfo.fc.wwpn,
484 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
485 mpt->mpt_fcport_page0.WWPN.High,
486 mpt->mpt_fcport_page0.WWPN.Low);
487
488 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
489 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
490 "World Wide Node Name");
491
492 SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
493 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
494 "World Wide Port Name");
2545bca0 495
2545bca0 496 MPT_LOCK(mpt);
2545bca0
MD
497 return (0);
498}
499
500/*
501 * Set FC configuration information.
502 */
503static int
504mpt_set_initial_config_fc(struct mpt_softc *mpt)
505{
2545bca0
MD
506 CONFIG_PAGE_FC_PORT_1 fc;
507 U32 fl;
508 int r, doit = 0;
509 int role;
510
511 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
512 &fc.Header, FALSE, 5000);
513 if (r) {
514 mpt_prt(mpt, "failed to read FC page 1 header\n");
515 return (mpt_fc_reset_link(mpt, 1));
516 }
517
518 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
519 &fc.Header, sizeof (fc), FALSE, 5000);
520 if (r) {
521 mpt_prt(mpt, "failed to read FC page 1\n");
522 return (mpt_fc_reset_link(mpt, 1));
523 }
524 mpt2host_config_page_fc_port_1(&fc);
525
526 /*
527 * Check our flags to make sure we support the role we want.
528 */
529 doit = 0;
530 role = 0;
531 fl = fc.Flags;
532
533 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
534 role |= MPT_ROLE_INITIATOR;
535 }
536 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
537 role |= MPT_ROLE_TARGET;
538 }
539
540 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
541
542 if (mpt->do_cfg_role == 0) {
543 role = mpt->cfg_role;
544 } else {
545 mpt->do_cfg_role = 0;
546 }
547
548 if (role != mpt->cfg_role) {
549 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
550 if ((role & MPT_ROLE_INITIATOR) == 0) {
551 mpt_prt(mpt, "adding initiator role\n");
552 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
553 doit++;
554 } else {
555 mpt_prt(mpt, "keeping initiator role\n");
556 }
557 } else if (role & MPT_ROLE_INITIATOR) {
558 mpt_prt(mpt, "removing initiator role\n");
559 doit++;
560 }
561 if (mpt->cfg_role & MPT_ROLE_TARGET) {
562 if ((role & MPT_ROLE_TARGET) == 0) {
563 mpt_prt(mpt, "adding target role\n");
564 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
565 doit++;
566 } else {
567 mpt_prt(mpt, "keeping target role\n");
568 }
569 } else if (role & MPT_ROLE_TARGET) {
570 mpt_prt(mpt, "removing target role\n");
571 doit++;
572 }
573 mpt->role = mpt->cfg_role;
574 }
575
576 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
577 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
578 mpt_prt(mpt, "adding OXID option\n");
579 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
580 doit++;
581 }
582 }
583
584 if (doit) {
585 fc.Flags = fl;
586 host2mpt_config_page_fc_port_1(&fc);
587 r = mpt_write_cfg_page(mpt,
588 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
589 sizeof(fc), FALSE, 5000);
590 if (r != 0) {
591 mpt_prt(mpt, "failed to update NVRAM with changes\n");
592 return (0);
593 }
594 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
595 "effect until next reboot or IOC reset\n");
596 }
597 return (0);
598}
599
600static int
601mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
602{
603 ConfigExtendedPageHeader_t hdr;
604 struct mptsas_phyinfo *phyinfo;
605 SasIOUnitPage0_t *buffer;
606 int error, len, i;
607
608 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
609 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
610 &hdr, 0, 10000);
611 if (error)
612 goto out;
613 if (hdr.ExtPageLength == 0) {
614 error = ENXIO;
615 goto out;
616 }
617
618 len = hdr.ExtPageLength * 4;
619 buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
620 if (buffer == NULL) {
621 error = ENOMEM;
622 goto out;
623 }
624
625 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
626 0, &hdr, buffer, len, 0, 10000);
627 if (error) {
628 kfree(buffer, M_DEVBUF);
629 goto out;
630 }
631
632 portinfo->num_phys = buffer->NumPhys;
633 portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) *
634 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
635 if (portinfo->phy_info == NULL) {
636 kfree(buffer, M_DEVBUF);
637 error = ENOMEM;
638 goto out;
639 }
640
641 for (i = 0; i < portinfo->num_phys; i++) {
642 phyinfo = &portinfo->phy_info[i];
643 phyinfo->phy_num = i;
644 phyinfo->port_id = buffer->PhyData[i].Port;
645 phyinfo->negotiated_link_rate =
646 buffer->PhyData[i].NegotiatedLinkRate;
647 phyinfo->handle =
648 le16toh(buffer->PhyData[i].ControllerDevHandle);
649 }
650
651 kfree(buffer, M_DEVBUF);
652out:
653 return (error);
654}
655
656static int
657mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
658 uint32_t form, uint32_t form_specific)
659{
660 ConfigExtendedPageHeader_t hdr;
661 SasPhyPage0_t *buffer;
662 int error;
663
664 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
665 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
666 0, 10000);
667 if (error)
668 goto out;
669 if (hdr.ExtPageLength == 0) {
670 error = ENXIO;
671 goto out;
672 }
673
674 buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
675 if (buffer == NULL) {
676 error = ENOMEM;
677 goto out;
678 }
679
680 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
681 form + form_specific, &hdr, buffer,
682 sizeof(SasPhyPage0_t), 0, 10000);
683 if (error) {
684 kfree(buffer, M_DEVBUF);
685 goto out;
686 }
687
688 phy_info->hw_link_rate = buffer->HwLinkRate;
689 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
690 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
691 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
692
693 kfree(buffer, M_DEVBUF);
694out:
695 return (error);
696}
697
698static int
699mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
700 uint32_t form, uint32_t form_specific)
701{
702 ConfigExtendedPageHeader_t hdr;
703 SasDevicePage0_t *buffer;
704 uint64_t sas_address;
705 int error = 0;
706
707 bzero(device_info, sizeof(*device_info));
708 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
709 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
710 &hdr, 0, 10000);
711 if (error)
712 goto out;
713 if (hdr.ExtPageLength == 0) {
714 error = ENXIO;
715 goto out;
716 }
717
718 buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
719 if (buffer == NULL) {
720 error = ENOMEM;
721 goto out;
722 }
723
724 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
725 form + form_specific, &hdr, buffer,
726 sizeof(SasDevicePage0_t), 0, 10000);
727 if (error) {
728 kfree(buffer, M_DEVBUF);
729 goto out;
730 }
731
732 device_info->dev_handle = le16toh(buffer->DevHandle);
733 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
734 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
735 device_info->slot = le16toh(buffer->Slot);
736 device_info->phy_num = buffer->PhyNum;
737 device_info->physical_port = buffer->PhysicalPort;
738 device_info->target_id = buffer->TargetID;
739 device_info->bus = buffer->Bus;
740 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
741 device_info->sas_address = le64toh(sas_address);
742 device_info->device_info = le32toh(buffer->DeviceInfo);
743
744 kfree(buffer, M_DEVBUF);
745out:
746 return (error);
747}
748
749/*
750 * Read SAS configuration information. Nothing to do yet.
751 */
752static int
753mpt_read_config_info_sas(struct mpt_softc *mpt)
754{
755 struct mptsas_portinfo *portinfo;
756 struct mptsas_phyinfo *phyinfo;
757 int error, i;
758
759 portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
760 if (portinfo == NULL)
761 return (ENOMEM);
762
763 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
764 if (error) {
765 kfree(portinfo, M_DEVBUF);
766 return (0);
767 }
768
769 for (i = 0; i < portinfo->num_phys; i++) {
770 phyinfo = &portinfo->phy_info[i];
771 error = mptsas_sas_phy_pg0(mpt, phyinfo,
772 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
773 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
774 if (error)
775 break;
776 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
777 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
778 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
779 phyinfo->handle);
780 if (error)
781 break;
782 phyinfo->identify.phy_num = phyinfo->phy_num = i;
783 if (phyinfo->attached.dev_handle)
784 error = mptsas_sas_device_pg0(mpt,
785 &phyinfo->attached,
786 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
787 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
788 phyinfo->attached.dev_handle);
789 if (error)
790 break;
791 }
792 mpt->sas_portinfo = portinfo;
793 return (0);
794}
795
796static void
797mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
798 int enabled)
799{
800 SataPassthroughRequest_t *pass;
801 request_t *req;
802 int error, status;
803
804 req = mpt_get_request(mpt, 0);
805 if (req == NULL)
806 return;
807
808 pass = req->req_vbuf;
809 bzero(pass, sizeof(SataPassthroughRequest_t));
810 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
811 pass->TargetID = devinfo->target_id;
812 pass->Bus = devinfo->bus;
813 pass->PassthroughFlags = 0;
814 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
815 pass->DataLength = 0;
816 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
817 pass->CommandFIS[0] = 0x27;
818 pass->CommandFIS[1] = 0x80;
819 pass->CommandFIS[2] = 0xef;
820 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
821 pass->CommandFIS[7] = 0x40;
822 pass->CommandFIS[15] = 0x08;
823
824 mpt_check_doorbell(mpt);
825 mpt_send_cmd(mpt, req);
826 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
827 10 * 1000);
828 if (error) {
829 mpt_free_request(mpt, req);
830 kprintf("error %d sending passthrough\n", error);
831 return;
832 }
833
834 status = le16toh(req->IOCStatus);
835 if (status != MPI_IOCSTATUS_SUCCESS) {
836 mpt_free_request(mpt, req);
837 kprintf("IOCSTATUS %d\n", status);
838 return;
839 }
840
841 mpt_free_request(mpt, req);
842}
843
844/*
845 * Set SAS configuration information. Nothing to do yet.
846 */
847static int
848mpt_set_initial_config_sas(struct mpt_softc *mpt)
849{
850 struct mptsas_phyinfo *phyinfo;
851 int i;
852
853 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
854 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
855 phyinfo = &mpt->sas_portinfo->phy_info[i];
856 if (phyinfo->attached.dev_handle == 0)
857 continue;
858 if ((phyinfo->attached.device_info &
859 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
860 continue;
861 if (bootverbose)
862 device_printf(mpt->dev,
863 "%sabling SATA WC on phy %d\n",
864 (mpt_enable_sata_wc) ? "En" : "Dis", i);
865 mptsas_set_sata_wc(mpt, &phyinfo->attached,
866 mpt_enable_sata_wc);
867 }
868 }
869
870 return (0);
871}
872
873static int
874mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
875 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
876{
2545bca0 877
4c42baf4 878 if (req != NULL) {
2545bca0
MD
879 if (reply_frame != NULL) {
880 req->IOCStatus = le16toh(reply_frame->IOCStatus);
881 }
882 req->state &= ~REQ_STATE_QUEUED;
883 req->state |= REQ_STATE_DONE;
884 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
885 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
886 wakeup(req);
887 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
888 /*
889 * Whew- we can free this request (late completion)
890 */
891 mpt_free_request(mpt, req);
892 }
893 }
894
895 return (TRUE);
896}
897
898/*
899 * Read SCSI configuration information
900 */
901static int
902mpt_read_config_info_spi(struct mpt_softc *mpt)
903{
904 int rv, i;
905
906 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
907 &mpt->mpt_port_page0.Header, FALSE, 5000);
908 if (rv) {
909 return (-1);
910 }
911 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
912 mpt->mpt_port_page0.Header.PageVersion,
913 mpt->mpt_port_page0.Header.PageLength,
914 mpt->mpt_port_page0.Header.PageNumber,
915 mpt->mpt_port_page0.Header.PageType);
916
917 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
918 &mpt->mpt_port_page1.Header, FALSE, 5000);
919 if (rv) {
920 return (-1);
921 }
922 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
923 mpt->mpt_port_page1.Header.PageVersion,
924 mpt->mpt_port_page1.Header.PageLength,
925 mpt->mpt_port_page1.Header.PageNumber,
926 mpt->mpt_port_page1.Header.PageType);
927
928 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
929 &mpt->mpt_port_page2.Header, FALSE, 5000);
930 if (rv) {
931 return (-1);
932 }
933 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
934 mpt->mpt_port_page2.Header.PageVersion,
935 mpt->mpt_port_page2.Header.PageLength,
936 mpt->mpt_port_page2.Header.PageNumber,
937 mpt->mpt_port_page2.Header.PageType);
938
939 for (i = 0; i < 16; i++) {
940 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
941 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
942 if (rv) {
943 return (-1);
944 }
945 mpt_lprt(mpt, MPT_PRT_DEBUG,
946 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
947 mpt->mpt_dev_page0[i].Header.PageVersion,
948 mpt->mpt_dev_page0[i].Header.PageLength,
949 mpt->mpt_dev_page0[i].Header.PageNumber,
950 mpt->mpt_dev_page0[i].Header.PageType);
951
952 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
953 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
954 if (rv) {
955 return (-1);
956 }
957 mpt_lprt(mpt, MPT_PRT_DEBUG,
958 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
959 mpt->mpt_dev_page1[i].Header.PageVersion,
960 mpt->mpt_dev_page1[i].Header.PageLength,
961 mpt->mpt_dev_page1[i].Header.PageNumber,
962 mpt->mpt_dev_page1[i].Header.PageType);
963 }
964
965 /*
966 * At this point, we don't *have* to fail. As long as we have
967 * valid config header information, we can (barely) lurch
968 * along.
969 */
970
971 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
972 sizeof(mpt->mpt_port_page0), FALSE, 5000);
973 if (rv) {
974 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
975 } else {
976 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
977 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
978 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
6d259fc1
SW
979 mpt->mpt_port_page0.Capabilities,
980 mpt->mpt_port_page0.PhysicalInterface);
2545bca0
MD
981 }
982
983 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
984 sizeof(mpt->mpt_port_page1), FALSE, 5000);
985 if (rv) {
986 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
987 } else {
988 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
989 mpt_lprt(mpt, MPT_PRT_DEBUG,
990 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
6d259fc1
SW
991 mpt->mpt_port_page1.Configuration,
992 mpt->mpt_port_page1.OnBusTimerValue);
2545bca0
MD
993 }
994
995 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
996 sizeof(mpt->mpt_port_page2), FALSE, 5000);
997 if (rv) {
998 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
999 } else {
1000 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1001 "Port Page 2: Flags %x Settings %x\n",
6d259fc1
SW
1002 mpt->mpt_port_page2.PortFlags,
1003 mpt->mpt_port_page2.PortSettings);
2545bca0
MD
1004 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1005 for (i = 0; i < 16; i++) {
1006 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1007 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1008 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1009 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1010 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1011 }
1012 }
1013
1014 for (i = 0; i < 16; i++) {
1015 rv = mpt_read_cur_cfg_page(mpt, i,
1016 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1017 FALSE, 5000);
1018 if (rv) {
1019 mpt_prt(mpt,
1020 "cannot read SPI Target %d Device Page 0\n", i);
1021 continue;
1022 }
1023 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1024 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1025 "target %d page 0: Negotiated Params %x Information %x\n",
6d259fc1
SW
1026 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1027 mpt->mpt_dev_page0[i].Information);
2545bca0
MD
1028
1029 rv = mpt_read_cur_cfg_page(mpt, i,
1030 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1031 FALSE, 5000);
1032 if (rv) {
1033 mpt_prt(mpt,
1034 "cannot read SPI Target %d Device Page 1\n", i);
1035 continue;
1036 }
1037 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1038 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1039 "target %d page 1: Requested Params %x Configuration %x\n",
6d259fc1
SW
1040 i, mpt->mpt_dev_page1[i].RequestedParameters,
1041 mpt->mpt_dev_page1[i].Configuration);
2545bca0
MD
1042 }
1043 return (0);
1044}
1045
1046/*
1047 * Validate SPI configuration information.
1048 *
1049 * In particular, validate SPI Port Page 1.
1050 */
1051static int
1052mpt_set_initial_config_spi(struct mpt_softc *mpt)
1053{
6d259fc1 1054 int error, i, pp1val;
2545bca0
MD
1055
1056 mpt->mpt_disc_enable = 0xff;
1057 mpt->mpt_tag_enable = 0;
1058
6d259fc1
SW
1059 pp1val = ((1 << mpt->mpt_ini_id) <<
1060 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
2545bca0
MD
1061 if (mpt->mpt_port_page1.Configuration != pp1val) {
1062 CONFIG_PAGE_SCSI_PORT_1 tmp;
1063
1064 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
6d259fc1 1065 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
2545bca0
MD
1066 tmp = mpt->mpt_port_page1;
1067 tmp.Configuration = pp1val;
1068 host2mpt_config_page_scsi_port_1(&tmp);
1069 error = mpt_write_cur_cfg_page(mpt, 0,
1070 &tmp.Header, sizeof(tmp), FALSE, 5000);
1071 if (error) {
1072 return (-1);
1073 }
1074 error = mpt_read_cur_cfg_page(mpt, 0,
1075 &tmp.Header, sizeof(tmp), FALSE, 5000);
1076 if (error) {
1077 return (-1);
1078 }
1079 mpt2host_config_page_scsi_port_1(&tmp);
1080 if (tmp.Configuration != pp1val) {
1081 mpt_prt(mpt,
1082 "failed to reset SPI Port Page 1 Config value\n");
1083 return (-1);
1084 }
1085 mpt->mpt_port_page1 = tmp;
1086 }
1087
1088 /*
1089 * The purpose of this exercise is to get
1090 * all targets back to async/narrow.
1091 *
1092 * We skip this step if the BIOS has already negotiated
1093 * speeds with the targets.
1094 */
1095 i = mpt->mpt_port_page2.PortSettings &
1096 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1097 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1098 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1099 "honoring BIOS transfer negotiations\n");
1100 } else {
1101 for (i = 0; i < 16; i++) {
1102 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1103 mpt->mpt_dev_page1[i].Configuration = 0;
1104 (void) mpt_update_spi_config(mpt, i);
1105 }
1106 }
1107 return (0);
1108}
1109
4c42baf4 1110static int
2545bca0
MD
1111mpt_cam_enable(struct mpt_softc *mpt)
1112{
1113 int error;
1114
1115 MPT_LOCK(mpt);
1116
1117 error = EIO;
1118 if (mpt->is_fc) {
1119 if (mpt_read_config_info_fc(mpt)) {
1120 goto out;
1121 }
1122 if (mpt_set_initial_config_fc(mpt)) {
1123 goto out;
1124 }
1125 } else if (mpt->is_sas) {
1126 if (mpt_read_config_info_sas(mpt)) {
1127 goto out;
1128 }
1129 if (mpt_set_initial_config_sas(mpt)) {
1130 goto out;
1131 }
1132 } else if (mpt->is_spi) {
1133 if (mpt_read_config_info_spi(mpt)) {
1134 goto out;
1135 }
1136 if (mpt_set_initial_config_spi(mpt)) {
1137 goto out;
1138 }
1139 }
1140 error = 0;
1141
1142out:
1143 MPT_UNLOCK(mpt);
1144 return (error);
1145}
1146
4c42baf4 1147static void
2545bca0
MD
1148mpt_cam_ready(struct mpt_softc *mpt)
1149{
4c42baf4 1150
2545bca0
MD
1151 /*
1152 * If we're in target mode, hang out resources now
1153 * so we don't cause the world to hang talking to us.
1154 */
1155 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1156 /*
1157 * Try to add some target command resources
1158 */
1159 MPT_LOCK(mpt);
1160 if (mpt_add_target_commands(mpt) == FALSE) {
1161 mpt_prt(mpt, "failed to add target commands\n");
1162 }
1163 MPT_UNLOCK(mpt);
1164 }
1165 mpt->ready = 1;
1166}
1167
4c42baf4 1168static void
2545bca0
MD
1169mpt_cam_detach(struct mpt_softc *mpt)
1170{
1171 mpt_handler_t handler;
1172
1173 MPT_LOCK(mpt);
1174 mpt->ready = 0;
1175 mpt_terminate_recovery_thread(mpt);
1176
1177 handler.reply_handler = mpt_scsi_reply_handler;
1178 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1179 scsi_io_handler_id);
1180 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1181 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1182 scsi_tmf_handler_id);
1183 handler.reply_handler = mpt_fc_els_reply_handler;
1184 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1185 fc_els_handler_id);
1186 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1187 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1188 mpt->scsi_tgt_handler_id);
1189 handler.reply_handler = mpt_sata_pass_reply_handler;
1190 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1191 sata_pass_handler_id);
1192
1193 if (mpt->tmf_req != NULL) {
1194 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1195 mpt_free_request(mpt, mpt->tmf_req);
1196 mpt->tmf_req = NULL;
1197 }
1198 if (mpt->sas_portinfo != NULL) {
1199 kfree(mpt->sas_portinfo, M_DEVBUF);
1200 mpt->sas_portinfo = NULL;
1201 }
2545bca0
MD
1202
1203 if (mpt->sim != NULL) {
1204 xpt_free_path(mpt->path);
1205 xpt_bus_deregister(cam_sim_path(mpt->sim));
1206 cam_sim_free(mpt->sim);
1207 mpt->sim = NULL;
1208 }
1209
1210 if (mpt->phydisk_sim != NULL) {
1211 xpt_free_path(mpt->phydisk_path);
1212 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1213 cam_sim_free(mpt->phydisk_sim);
1214 mpt->phydisk_sim = NULL;
1215 }
6d259fc1 1216 MPT_UNLOCK(mpt);
2545bca0
MD
1217}
1218
1219/* This routine is used after a system crash to dump core onto the swap device.
1220 */
1221static void
1222mpt_poll(struct cam_sim *sim)
1223{
1224 struct mpt_softc *mpt;
1225
1226 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1227 mpt_intr(mpt);
1228}
1229
1230/*
1231 * Watchdog timeout routine for SCSI requests.
1232 */
1233static void
1234mpt_timeout(void *arg)
1235{
1236 union ccb *ccb;
1237 struct mpt_softc *mpt;
1238 request_t *req;
1239
1240 ccb = (union ccb *)arg;
1241 mpt = ccb->ccb_h.ccb_mpt_ptr;
1242
2be58998 1243 MPT_LOCK(mpt);
2545bca0
MD
1244 req = ccb->ccb_h.ccb_req_ptr;
1245 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1246 req->serno, ccb, req->ccb);
1247/* XXX: WHAT ARE WE TRYING TO DO HERE? */
1248 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1249 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1250 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1251 req->state |= REQ_STATE_TIMEDOUT;
1252 mpt_wakeup_recovery_thread(mpt);
1253 }
2be58998 1254 MPT_UNLOCK(mpt);
2545bca0
MD
1255}
1256
1257/*
1258 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1259 *
1260 * Takes a list of physical segments and builds the SGL for SCSI IO command
1261 * and forwards the commard to the IOC after one last check that CAM has not
1262 * aborted the transaction.
1263 */
1264static void
1265mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1266{
1267 request_t *req, *trq;
1268 char *mpt_off;
1269 union ccb *ccb;
1270 struct mpt_softc *mpt;
4c42baf4
SW
1271 bus_addr_t chain_list_addr;
1272 int first_lim, seg, this_seg_lim;
1273 uint32_t addr, cur_off, flags, nxt_off, tf;
2545bca0
MD
1274 void *sglp = NULL;
1275 MSG_REQUEST_HEADER *hdrp;
1276 SGE_SIMPLE64 *se;
1277 SGE_CHAIN64 *ce;
1278 int istgt = 0;
1279
1280 req = (request_t *)arg;
1281 ccb = req->ccb;
1282
1283 mpt = ccb->ccb_h.ccb_mpt_ptr;
1284 req = ccb->ccb_h.ccb_req_ptr;
1285
1286 hdrp = req->req_vbuf;
1287 mpt_off = req->req_vbuf;
1288
1289 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1290 error = EFBIG;
1291 }
1292
1293 if (error == 0) {
1294 switch (hdrp->Function) {
1295 case MPI_FUNCTION_SCSI_IO_REQUEST:
1296 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1297 istgt = 0;
1298 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1299 break;
1300 case MPI_FUNCTION_TARGET_ASSIST:
1301 istgt = 1;
1302 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1303 break;
1304 default:
1305 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1306 hdrp->Function);
1307 error = EINVAL;
1308 break;
1309 }
1310 }
1311
1312 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1313 error = EFBIG;
1314 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1315 nseg, mpt->max_seg_cnt);
1316 }
1317
1318bad:
1319 if (error != 0) {
1320 if (error != EFBIG && error != ENOMEM) {
1321 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1322 }
1323 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1324 cam_status status;
1325 mpt_freeze_ccb(ccb);
1326 if (error == EFBIG) {
1327 status = CAM_REQ_TOO_BIG;
1328 } else if (error == ENOMEM) {
1329 if (mpt->outofbeer == 0) {
1330 mpt->outofbeer = 1;
1331 xpt_freeze_simq(mpt->sim, 1);
1332 mpt_lprt(mpt, MPT_PRT_DEBUG,
1333 "FREEZEQ\n");
1334 }
1335 status = CAM_REQUEUE_REQ;
1336 } else {
1337 status = CAM_REQ_CMP_ERR;
1338 }
1339 mpt_set_ccb_status(ccb, status);
1340 }
1341 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1342 request_t *cmd_req =
1343 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1344 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1345 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1346 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1347 }
1348 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4c42baf4 1349 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2545bca0 1350 xpt_done(ccb);
2545bca0 1351 mpt_free_request(mpt, req);
2545bca0
MD
1352 return;
1353 }
1354
1355 /*
1356 * No data to transfer?
1357 * Just make a single simple SGL with zero length.
1358 */
1359
1360 if (mpt->verbose >= MPT_PRT_DEBUG) {
1361 int tidx = ((char *)sglp) - mpt_off;
1362 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1363 }
1364
1365 if (nseg == 0) {
1366 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1367 MPI_pSGE_SET_FLAGS(se1,
1368 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1369 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1370 se1->FlagsLength = htole32(se1->FlagsLength);
1371 goto out;
1372 }
1373
1374
1375 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1376 if (istgt == 0) {
1377 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1378 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1379 }
1380 } else {
1381 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1382 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1383 }
1384 }
1385
1386 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1387 bus_dmasync_op_t op;
1388 if (istgt == 0) {
1389 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1390 op = BUS_DMASYNC_PREREAD;
1391 } else {
1392 op = BUS_DMASYNC_PREWRITE;
1393 }
1394 } else {
1395 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1396 op = BUS_DMASYNC_PREWRITE;
1397 } else {
1398 op = BUS_DMASYNC_PREREAD;
1399 }
1400 }
1401 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1402 }
1403
1404 /*
1405 * Okay, fill in what we can at the end of the command frame.
1406 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1407 * the command frame.
1408 *
1409 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1410 * SIMPLE64 pointers and start doing CHAIN64 entries after
1411 * that.
1412 */
1413
1414 if (nseg < MPT_NSGL_FIRST(mpt)) {
1415 first_lim = nseg;
1416 } else {
1417 /*
1418 * Leave room for CHAIN element
1419 */
1420 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1421 }
1422
1423 se = (SGE_SIMPLE64 *) sglp;
1424 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
4c42baf4 1425 tf = flags;
2545bca0 1426 memset(se, 0, sizeof (*se));
4c42baf4 1427 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
2545bca0
MD
1428 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1429 if (sizeof(bus_addr_t) > 4) {
4c42baf4
SW
1430 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1431 /* SAS1078 36GB limitation WAR */
1432 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
1433 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
f582582c 1434 addr |= (1U << 31);
4c42baf4
SW
1435 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1436 }
1437 se->Address.High = htole32(addr);
2545bca0 1438 }
2545bca0
MD
1439 if (seg == first_lim - 1) {
1440 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1441 }
1442 if (seg == nseg - 1) {
1443 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1444 MPI_SGE_FLAGS_END_OF_BUFFER;
1445 }
1446 MPI_pSGE_SET_FLAGS(se, tf);
1447 se->FlagsLength = htole32(se->FlagsLength);
1448 }
1449
1450 if (seg == nseg) {
1451 goto out;
1452 }
1453
1454 /*
1455 * Tell the IOC where to find the first chain element.
1456 */
1457 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1458 nxt_off = MPT_RQSL(mpt);
1459 trq = req;
1460
1461 /*
1462 * Make up the rest of the data segments out of a chain element
4c42baf4 1463 * (contained in the current request frame) which points to
2545bca0
MD
1464 * SIMPLE64 elements in the next request frame, possibly ending
1465 * with *another* chain element (if there's more).
1466 */
1467 while (seg < nseg) {
2545bca0
MD
1468 /*
1469 * Point to the chain descriptor. Note that the chain
1470 * descriptor is at the end of the *previous* list (whether
1471 * chain or simple).
1472 */
1473 ce = (SGE_CHAIN64 *) se;
1474
1475 /*
1476 * Before we change our current pointer, make sure we won't
1477 * overflow the request area with this frame. Note that we
1478 * test against 'greater than' here as it's okay in this case
1479 * to have next offset be just outside the request area.
1480 */
1481 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1482 nxt_off = MPT_REQUEST_AREA;
1483 goto next_chain;
1484 }
1485
1486 /*
1487 * Set our SGE element pointer to the beginning of the chain
1488 * list and update our next chain list offset.
1489 */
1490 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1491 cur_off = nxt_off;
1492 nxt_off += MPT_RQSL(mpt);
1493
1494 /*
4c42baf4 1495 * Now initialize the chain descriptor.
2545bca0
MD
1496 */
1497 memset(ce, 0, sizeof (*ce));
1498
1499 /*
1500 * Get the physical address of the chain list.
1501 */
1502 chain_list_addr = trq->req_pbuf;
1503 chain_list_addr += cur_off;
1504 if (sizeof (bus_addr_t) > 4) {
1505 ce->Address.High =
1506 htole32(((uint64_t)chain_list_addr) >> 32);
1507 }
1508 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1509 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1510 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1511
1512 /*
1513 * If we have more than a frame's worth of segments left,
1514 * set up the chain list to have the last element be another
1515 * chain descriptor.
1516 */
1517 if ((nseg - seg) > MPT_NSGL(mpt)) {
1518 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1519 /*
1520 * The length of the chain is the length in bytes of the
1521 * number of segments plus the next chain element.
1522 *
1523 * The next chain descriptor offset is the length,
1524 * in words, of the number of segments.
1525 */
1526 ce->Length = (this_seg_lim - seg) *
1527 sizeof (SGE_SIMPLE64);
1528 ce->NextChainOffset = ce->Length >> 2;
1529 ce->Length += sizeof (SGE_CHAIN64);
1530 } else {
1531 this_seg_lim = nseg;
1532 ce->Length = (this_seg_lim - seg) *
1533 sizeof (SGE_SIMPLE64);
1534 }
1535 ce->Length = htole16(ce->Length);
1536
1537 /*
1538 * Fill in the chain list SGE elements with our segment data.
1539 *
1540 * If we're the last element in this chain list, set the last
1541 * element flag. If we're the completely last element period,
1542 * set the end of list and end of buffer flags.
1543 */
1544 while (seg < this_seg_lim) {
4c42baf4 1545 tf = flags;
2545bca0 1546 memset(se, 0, sizeof (*se));
4c42baf4 1547 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
2545bca0
MD
1548 se->Address.Low = htole32(dm_segs->ds_addr &
1549 0xffffffff);
1550 if (sizeof (bus_addr_t) > 4) {
4c42baf4
SW
1551 addr = ((uint64_t)dm_segs->ds_addr) >> 32;
1552 /* SAS1078 36GB limitation WAR */
1553 if (mpt->is_1078 &&
1554 (((uint64_t)dm_segs->ds_addr +
1555 MPI_SGE_LENGTH(se->FlagsLength)) >>
1556 32) == 9) {
f582582c 1557 addr |= (1U << 31);
4c42baf4
SW
1558 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
1559 }
1560 se->Address.High = htole32(addr);
2545bca0 1561 }
4c42baf4 1562 if (seg == this_seg_lim - 1) {
2545bca0
MD
1563 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1564 }
1565 if (seg == nseg - 1) {
1566 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1567 MPI_SGE_FLAGS_END_OF_BUFFER;
1568 }
1569 MPI_pSGE_SET_FLAGS(se, tf);
1570 se->FlagsLength = htole32(se->FlagsLength);
1571 se++;
1572 seg++;
1573 dm_segs++;
1574 }
1575
1576 next_chain:
1577 /*
1578 * If we have more segments to do and we've used up all of
1579 * the space in a request area, go allocate another one
1580 * and chain to that.
1581 */
1582 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1583 request_t *nrq;
1584
2545bca0 1585 nrq = mpt_get_request(mpt, FALSE);
2545bca0
MD
1586
1587 if (nrq == NULL) {
1588 error = ENOMEM;
1589 goto bad;
1590 }
1591
1592 /*
1593 * Append the new request area on the tail of our list.
1594 */
1595 if ((trq = req->chain) == NULL) {
1596 req->chain = nrq;
1597 } else {
1598 while (trq->chain != NULL) {
1599 trq = trq->chain;
1600 }
1601 trq->chain = nrq;
1602 }
1603 trq = nrq;
1604 mpt_off = trq->req_vbuf;
1605 if (mpt->verbose >= MPT_PRT_DEBUG) {
1606 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1607 }
1608 nxt_off = 0;
1609 }
1610 }
1611out:
1612
1613 /*
1614 * Last time we need to check if this CCB needs to be aborted.
1615 */
1616 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1617 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1618 request_t *cmd_req =
1619 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1620 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1621 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1622 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1623 }
1624 mpt_prt(mpt,
1625 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1626 ccb->ccb_h.status & CAM_STATUS_MASK);
1627 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1628 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1629 }
1630 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4c42baf4 1631 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2545bca0 1632 xpt_done(ccb);
2545bca0 1633 mpt_free_request(mpt, req);
2545bca0
MD
1634 return;
1635 }
1636
1637 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1638 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1639 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1640 mpt_timeout, ccb);
1641 }
1642 if (mpt->verbose > MPT_PRT_DEBUG) {
1643 int nc = 0;
1644 mpt_print_request(req->req_vbuf);
1645 for (trq = req->chain; trq; trq = trq->chain) {
1646 kprintf(" Additional Chain Area %d\n", nc++);
1647 mpt_dump_sgl(trq->req_vbuf, 0);
1648 }
1649 }
1650
1651 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1652 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1653 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1654#ifdef WE_TRUST_AUTO_GOOD_STATUS
1655 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1656 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1657 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1658 } else {
1659 tgt->state = TGT_STATE_MOVING_DATA;
1660 }
1661#else
1662 tgt->state = TGT_STATE_MOVING_DATA;
1663#endif
1664 }
2545bca0 1665 mpt_send_cmd(mpt, req);
2545bca0
MD
1666}
1667
1668static void
1669mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1670{
1671 request_t *req, *trq;
1672 char *mpt_off;
1673 union ccb *ccb;
1674 struct mpt_softc *mpt;
1675 int seg, first_lim;
1676 uint32_t flags, nxt_off;
1677 void *sglp = NULL;
1678 MSG_REQUEST_HEADER *hdrp;
1679 SGE_SIMPLE32 *se;
1680 SGE_CHAIN32 *ce;
1681 int istgt = 0;
1682
1683 req = (request_t *)arg;
1684 ccb = req->ccb;
1685
1686 mpt = ccb->ccb_h.ccb_mpt_ptr;
1687 req = ccb->ccb_h.ccb_req_ptr;
1688
1689 hdrp = req->req_vbuf;
1690 mpt_off = req->req_vbuf;
1691
1692
1693 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1694 error = EFBIG;
1695 }
1696
1697 if (error == 0) {
1698 switch (hdrp->Function) {
1699 case MPI_FUNCTION_SCSI_IO_REQUEST:
1700 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1701 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1702 break;
1703 case MPI_FUNCTION_TARGET_ASSIST:
1704 istgt = 1;
1705 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1706 break;
1707 default:
1708 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1709 hdrp->Function);
1710 error = EINVAL;
1711 break;
1712 }
1713 }
1714
1715 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1716 error = EFBIG;
1717 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1718 nseg, mpt->max_seg_cnt);
1719 }
1720
1721bad:
1722 if (error != 0) {
1723 if (error != EFBIG && error != ENOMEM) {
1724 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1725 }
1726 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1727 cam_status status;
1728 mpt_freeze_ccb(ccb);
1729 if (error == EFBIG) {
1730 status = CAM_REQ_TOO_BIG;
1731 } else if (error == ENOMEM) {
1732 if (mpt->outofbeer == 0) {
1733 mpt->outofbeer = 1;
1734 xpt_freeze_simq(mpt->sim, 1);
1735 mpt_lprt(mpt, MPT_PRT_DEBUG,
1736 "FREEZEQ\n");
1737 }
1738 status = CAM_REQUEUE_REQ;
1739 } else {
1740 status = CAM_REQ_CMP_ERR;
1741 }
1742 mpt_set_ccb_status(ccb, status);
1743 }
1744 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1745 request_t *cmd_req =
1746 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1747 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1748 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1749 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1750 }
1751 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4c42baf4 1752 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2545bca0 1753 xpt_done(ccb);
2545bca0 1754 mpt_free_request(mpt, req);
2545bca0
MD
1755 return;
1756 }
1757
1758 /*
1759 * No data to transfer?
1760 * Just make a single simple SGL with zero length.
1761 */
1762
1763 if (mpt->verbose >= MPT_PRT_DEBUG) {
1764 int tidx = ((char *)sglp) - mpt_off;
1765 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1766 }
1767
1768 if (nseg == 0) {
1769 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1770 MPI_pSGE_SET_FLAGS(se1,
1771 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1772 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1773 se1->FlagsLength = htole32(se1->FlagsLength);
1774 goto out;
1775 }
1776
1777
1778 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1779 if (istgt == 0) {
1780 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1781 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1782 }
1783 } else {
1784 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1785 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1786 }
1787 }
1788
1789 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1790 bus_dmasync_op_t op;
1791 if (istgt) {
1792 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1793 op = BUS_DMASYNC_PREREAD;
1794 } else {
1795 op = BUS_DMASYNC_PREWRITE;
1796 }
1797 } else {
1798 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1799 op = BUS_DMASYNC_PREWRITE;
1800 } else {
1801 op = BUS_DMASYNC_PREREAD;
1802 }
1803 }
1804 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1805 }
1806
1807 /*
1808 * Okay, fill in what we can at the end of the command frame.
1809 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1810 * the command frame.
1811 *
1812 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1813 * SIMPLE32 pointers and start doing CHAIN32 entries after
1814 * that.
1815 */
1816
1817 if (nseg < MPT_NSGL_FIRST(mpt)) {
1818 first_lim = nseg;
1819 } else {
1820 /*
1821 * Leave room for CHAIN element
1822 */
1823 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1824 }
1825
1826 se = (SGE_SIMPLE32 *) sglp;
1827 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1828 uint32_t tf;
1829
1830 memset(se, 0,sizeof (*se));
1831 se->Address = htole32(dm_segs->ds_addr);
1832
2545bca0
MD
1833 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1834 tf = flags;
1835 if (seg == first_lim - 1) {
1836 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1837 }
1838 if (seg == nseg - 1) {
1839 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1840 MPI_SGE_FLAGS_END_OF_BUFFER;
1841 }
1842 MPI_pSGE_SET_FLAGS(se, tf);
1843 se->FlagsLength = htole32(se->FlagsLength);
1844 }
1845
1846 if (seg == nseg) {
1847 goto out;
1848 }
1849
1850 /*
1851 * Tell the IOC where to find the first chain element.
1852 */
1853 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1854 nxt_off = MPT_RQSL(mpt);
1855 trq = req;
1856
1857 /*
1858 * Make up the rest of the data segments out of a chain element
4c42baf4 1859 * (contained in the current request frame) which points to
2545bca0
MD
1860 * SIMPLE32 elements in the next request frame, possibly ending
1861 * with *another* chain element (if there's more).
1862 */
1863 while (seg < nseg) {
1864 int this_seg_lim;
1865 uint32_t tf, cur_off;
1866 bus_addr_t chain_list_addr;
1867
1868 /*
1869 * Point to the chain descriptor. Note that the chain
1870 * descriptor is at the end of the *previous* list (whether
1871 * chain or simple).
1872 */
1873 ce = (SGE_CHAIN32 *) se;
1874
1875 /*
1876 * Before we change our current pointer, make sure we won't
1877 * overflow the request area with this frame. Note that we
1878 * test against 'greater than' here as it's okay in this case
1879 * to have next offset be just outside the request area.
1880 */
1881 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1882 nxt_off = MPT_REQUEST_AREA;
1883 goto next_chain;
1884 }
1885
1886 /*
1887 * Set our SGE element pointer to the beginning of the chain
1888 * list and update our next chain list offset.
1889 */
1890 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1891 cur_off = nxt_off;
1892 nxt_off += MPT_RQSL(mpt);
1893
1894 /*
4c42baf4 1895 * Now initialize the chain descriptor.
2545bca0
MD
1896 */
1897 memset(ce, 0, sizeof (*ce));
1898
1899 /*
1900 * Get the physical address of the chain list.
1901 */
1902 chain_list_addr = trq->req_pbuf;
1903 chain_list_addr += cur_off;
1904
1905
1906
1907 ce->Address = htole32(chain_list_addr);
1908 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1909
1910
1911 /*
1912 * If we have more than a frame's worth of segments left,
1913 * set up the chain list to have the last element be another
1914 * chain descriptor.
1915 */
1916 if ((nseg - seg) > MPT_NSGL(mpt)) {
1917 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1918 /*
1919 * The length of the chain is the length in bytes of the
1920 * number of segments plus the next chain element.
1921 *
1922 * The next chain descriptor offset is the length,
1923 * in words, of the number of segments.
1924 */
1925 ce->Length = (this_seg_lim - seg) *
1926 sizeof (SGE_SIMPLE32);
1927 ce->NextChainOffset = ce->Length >> 2;
1928 ce->Length += sizeof (SGE_CHAIN32);
1929 } else {
1930 this_seg_lim = nseg;
1931 ce->Length = (this_seg_lim - seg) *
1932 sizeof (SGE_SIMPLE32);
1933 }
1934 ce->Length = htole16(ce->Length);
1935
1936 /*
1937 * Fill in the chain list SGE elements with our segment data.
1938 *
1939 * If we're the last element in this chain list, set the last
1940 * element flag. If we're the completely last element period,
1941 * set the end of list and end of buffer flags.
1942 */
1943 while (seg < this_seg_lim) {
1944 memset(se, 0, sizeof (*se));
1945 se->Address = htole32(dm_segs->ds_addr);
1946
2545bca0
MD
1947 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1948 tf = flags;
4c42baf4 1949 if (seg == this_seg_lim - 1) {
2545bca0
MD
1950 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1951 }
1952 if (seg == nseg - 1) {
1953 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1954 MPI_SGE_FLAGS_END_OF_BUFFER;
1955 }
1956 MPI_pSGE_SET_FLAGS(se, tf);
1957 se->FlagsLength = htole32(se->FlagsLength);
1958 se++;
1959 seg++;
1960 dm_segs++;
1961 }
1962
1963 next_chain:
1964 /*
1965 * If we have more segments to do and we've used up all of
1966 * the space in a request area, go allocate another one
1967 * and chain to that.
1968 */
1969 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1970 request_t *nrq;
1971
2545bca0 1972 nrq = mpt_get_request(mpt, FALSE);
2545bca0
MD
1973
1974 if (nrq == NULL) {
1975 error = ENOMEM;
1976 goto bad;
1977 }
1978
1979 /*
1980 * Append the new request area on the tail of our list.
1981 */
1982 if ((trq = req->chain) == NULL) {
1983 req->chain = nrq;
1984 } else {
1985 while (trq->chain != NULL) {
1986 trq = trq->chain;
1987 }
1988 trq->chain = nrq;
1989 }
1990 trq = nrq;
1991 mpt_off = trq->req_vbuf;
1992 if (mpt->verbose >= MPT_PRT_DEBUG) {
1993 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1994 }
1995 nxt_off = 0;
1996 }
1997 }
1998out:
1999
2000 /*
2001 * Last time we need to check if this CCB needs to be aborted.
2002 */
2003 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2004 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2005 request_t *cmd_req =
2006 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2007 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2008 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2009 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2010 }
2011 mpt_prt(mpt,
2012 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2013 ccb->ccb_h.status & CAM_STATUS_MASK);
2014 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2015 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2016 }
2017 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4c42baf4 2018 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2545bca0 2019 xpt_done(ccb);
2545bca0 2020 mpt_free_request(mpt, req);
2545bca0
MD
2021 return;
2022 }
2023
2024 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2025 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2026 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2027 mpt_timeout, ccb);
2028 }
2029 if (mpt->verbose > MPT_PRT_DEBUG) {
2030 int nc = 0;
2031 mpt_print_request(req->req_vbuf);
2032 for (trq = req->chain; trq; trq = trq->chain) {
2033 kprintf(" Additional Chain Area %d\n", nc++);
2034 mpt_dump_sgl(trq->req_vbuf, 0);
2035 }
2036 }
2037
2038 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2039 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2040 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2041#ifdef WE_TRUST_AUTO_GOOD_STATUS
2042 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2043 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2044 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2045 } else {
2046 tgt->state = TGT_STATE_MOVING_DATA;
2047 }
2048#else
2049 tgt->state = TGT_STATE_MOVING_DATA;
2050#endif
2051 }
2545bca0 2052 mpt_send_cmd(mpt, req);
2545bca0
MD
2053}
2054
2055static void
2056mpt_start(struct cam_sim *sim, union ccb *ccb)
2057{
2058 request_t *req;
2059 struct mpt_softc *mpt;
2060 MSG_SCSI_IO_REQUEST *mpt_req;
2061 struct ccb_scsiio *csio = &ccb->csio;
2062 struct ccb_hdr *ccbh = &ccb->ccb_h;
2063 bus_dmamap_callback_t *cb;
2064 target_id_t tgt;
2065 int raid_passthru;
2066
2067 /* Get the pointer for the physical addapter */
2068 mpt = ccb->ccb_h.ccb_mpt_ptr;
2069 raid_passthru = (sim == mpt->phydisk_sim);
2070
2545bca0
MD
2071 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2072 if (mpt->outofbeer == 0) {
2073 mpt->outofbeer = 1;
2074 xpt_freeze_simq(mpt->sim, 1);
2075 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2076 }
2077 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2078 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2545bca0
MD
2079 xpt_done(ccb);
2080 return;
2081 }
2082#ifdef INVARIANTS
2083 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2084#endif
2545bca0
MD
2085
2086 if (sizeof (bus_addr_t) > 4) {
2087 cb = mpt_execute_req_a64;
2088 } else {
2089 cb = mpt_execute_req;
2090 }
2091
2092 /*
2093 * Link the ccb and the request structure so we can find
2094 * the other knowing either the request or the ccb
2095 */
2096 req->ccb = ccb;
2097 ccb->ccb_h.ccb_req_ptr = req;
2098
2099 /* Now we build the command for the IOC */
2100 mpt_req = req->req_vbuf;
2101 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2102
2103 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2104 if (raid_passthru) {
2105 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2545bca0 2106 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2545bca0
MD
2107 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2108 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2109 xpt_done(ccb);
2110 return;
2111 }
2545bca0
MD
2112 mpt_req->Bus = 0; /* we never set bus here */
2113 } else {
2114 tgt = ccb->ccb_h.target_id;
2115 mpt_req->Bus = 0; /* XXX */
2116
2117 }
2118 mpt_req->SenseBufferLength =
2119 (csio->sense_len < MPT_SENSE_SIZE) ?
2120 csio->sense_len : MPT_SENSE_SIZE;
2121
2122 /*
2123 * We use the message context to find the request structure when we
2124 * Get the command completion interrupt from the IOC.
2125 */
2126 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2127
2128 /* Which physical device to do the I/O on */
2129 mpt_req->TargetID = tgt;
2130
2131 /* We assume a single level LUN type */
2132 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2133 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2134 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2135 } else {
2136 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2137 }
2138
2139 /* Set the direction of the transfer */
2140 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2141 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2142 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2143 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2144 } else {
2145 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2146 }
2147
2148 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2149 switch(ccb->csio.tag_action) {
2150 case MSG_HEAD_OF_Q_TAG:
2151 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2152 break;
2153 case MSG_ACA_TASK:
2154 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2155 break;
2156 case MSG_ORDERED_Q_TAG:
2157 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2158 break;
2159 case MSG_SIMPLE_Q_TAG:
2160 default:
2161 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2162 break;
2163 }
2164 } else {
2165 if (mpt->is_fc || mpt->is_sas) {
2166 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2167 } else {
2168 /* XXX No such thing for a target doing packetized. */
2169 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2170 }
2171 }
2172
2173 if (mpt->is_spi) {
2174 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2175 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2176 }
2177 }
2178 mpt_req->Control = htole32(mpt_req->Control);
2179
2180 /* Copy the scsi command block into place */
2181 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2182 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2183 } else {
2184 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2185 }
2186
2187 mpt_req->CDBLength = csio->cdb_len;
2188 mpt_req->DataLength = htole32(csio->dxfer_len);
2189 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2190
2191 /*
2192 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2193 */
2194 if (mpt->verbose == MPT_PRT_DEBUG) {
2195 U32 df;
2196 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2197 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2198 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2199 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2200 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2201 mpt_prtc(mpt, "(%s %u byte%s ",
2202 (df == MPI_SCSIIO_CONTROL_READ)?
2203 "read" : "write", csio->dxfer_len,
2204 (csio->dxfer_len == 1)? ")" : "s)");
2205 }
2206 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2207 ccb->ccb_h.target_lun, req, req->serno);
2208 }
2209
2210 /*
2211 * If we have any data to send with this command map it into bus space.
2212 */
2213 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2214 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2215 /*
2216 * We've been given a pointer to a single buffer.
2217 */
2218 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2219 /*
2220 * Virtual address that needs to translated into
2221 * one or more physical address ranges.
2222 */
2223 int error;
6d259fc1 2224 crit_enter();
2545bca0
MD
2225 error = bus_dmamap_load(mpt->buffer_dmat,
2226 req->dmap, csio->data_ptr, csio->dxfer_len,
2227 cb, req, 0);
6d259fc1 2228 crit_exit();
2545bca0
MD
2229 if (error == EINPROGRESS) {
2230 /*
2231 * So as to maintain ordering,
2232 * freeze the controller queue
2233 * until our mapping is
2234 * returned.
2235 */
2236 xpt_freeze_simq(mpt->sim, 1);
2237 ccbh->status |= CAM_RELEASE_SIMQ;
2238 }
2239 } else {
2240 /*
2241 * We have been given a pointer to single
2242 * physical buffer.
2243 */
2244 struct bus_dma_segment seg;
2245 seg.ds_addr =
2246 (bus_addr_t)(vm_offset_t)csio->data_ptr;
2247 seg.ds_len = csio->dxfer_len;
2248 (*cb)(req, &seg, 1, 0);
2249 }
2250 } else {
2251 /*
2252 * We have been given a list of addresses.
2253 * This case could be easily supported but they are not
2254 * currently generated by the CAM subsystem so there
2255 * is no point in wasting the time right now.
2256 */
2257 struct bus_dma_segment *segs;
2258 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2259 (*cb)(req, NULL, 0, EFAULT);
2260 } else {
2261 /* Just use the segments provided */
2262 segs = (struct bus_dma_segment *)csio->data_ptr;
2263 (*cb)(req, segs, csio->sglist_cnt, 0);
2264 }
2265 }
2266 } else {
2267 (*cb)(req, NULL, 0, 0);
2268 }
2269}
2270
2271static int
2272mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2273 int sleep_ok)
2274{
2275 int error;
2276 uint16_t status;
2277 uint8_t response;
2278
2279 error = mpt_scsi_send_tmf(mpt,
2280 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2281 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2282 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2283 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2284 0, /* XXX How do I get the channel ID? */
2285 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2286 lun != CAM_LUN_WILDCARD ? lun : 0,
2287 0, sleep_ok);
2288
2289 if (error != 0) {
2290 /*
2291 * mpt_scsi_send_tmf hard resets on failure, so no
2292 * need to do so here.
2293 */
2294 mpt_prt(mpt,
2295 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2296 return (EIO);
2297 }
2298
2299 /* Wait for bus reset to be processed by the IOC. */
2300 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2301 REQ_STATE_DONE, sleep_ok, 5000);
2302
2303 status = le16toh(mpt->tmf_req->IOCStatus);
2304 response = mpt->tmf_req->ResponseCode;
2305 mpt->tmf_req->state = REQ_STATE_FREE;
2306
2307 if (error) {
2308 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2309 "Resetting controller.\n");
2310 mpt_reset(mpt, TRUE);
2311 return (ETIMEDOUT);
2312 }
2313
2314 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2315 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2316 "Resetting controller.\n", status);
2317 mpt_reset(mpt, TRUE);
2318 return (EIO);
2319 }
2320
2321 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2322 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2323 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2324 "Resetting controller.\n", response);
2325 mpt_reset(mpt, TRUE);
2326 return (EIO);
2327 }
2328 return (0);
2329}
2330
2331static int
2332mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2333{
2334 int r = 0;
2335 request_t *req;
2336 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2337
2338 req = mpt_get_request(mpt, FALSE);
2339 if (req == NULL) {
2340 return (ENOMEM);
2341 }
2342 fc = req->req_vbuf;
2343 memset(fc, 0, sizeof(*fc));
2344 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2345 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2346 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2347 mpt_send_cmd(mpt, req);
2348 if (dowait) {
2349 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2350 REQ_STATE_DONE, FALSE, 60 * 1000);
2351 if (r == 0) {
2352 mpt_free_request(mpt, req);
2353 }
2354 }
2355 return (r);
2356}
2357
6d259fc1
SW
2358static void
2359mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
2360{
2361 xpt_free_path(ccb->ccb_h.path);
cec957e9 2362 xpt_free_ccb(&ccb->ccb_h);
6d259fc1
SW
2363}
2364
2545bca0
MD
2365static int
2366mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2367 MSG_EVENT_NOTIFY_REPLY *msg)
2368{
2369 uint32_t data0, data1;
2370
2371 data0 = le32toh(msg->Data[0]);
2372 data1 = le32toh(msg->Data[1]);
2373 switch(msg->Event & 0xFF) {
2374 case MPI_EVENT_UNIT_ATTENTION:
2375 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2376 (data0 >> 8) & 0xff, data0 & 0xff);
2377 break;
2378
2379 case MPI_EVENT_IOC_BUS_RESET:
2380 /* We generated a bus reset */
2381 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2382 (data0 >> 8) & 0xff);
2383 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2384 break;
2385
2386 case MPI_EVENT_EXT_BUS_RESET:
2387 /* Someone else generated a bus reset */
2388 mpt_prt(mpt, "External Bus Reset Detected\n");
2389 /*
2390 * These replies don't return EventData like the MPI
2391 * spec says they do
2392 */
2393 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2394 break;
2395
2396 case MPI_EVENT_RESCAN:
2545bca0
MD
2397 {
2398 union ccb *ccb;
2399 uint32_t pathid;
2400 /*
2401 * In general this means a device has been added to the loop.
2402 */
2403 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2404 if (mpt->ready == 0) {
2405 break;
2406 }
2407 if (mpt->phydisk_sim) {
2408 pathid = cam_sim_path(mpt->phydisk_sim);
2409 } else {
2410 pathid = cam_sim_path(mpt->sim);
2411 }
2545bca0
MD
2412 /*
2413 * Allocate a CCB, create a wildcard path for this bus,
2414 * and schedule a rescan.
2415 */
cec957e9 2416 ccb = xpt_alloc_ccb();
2545bca0
MD
2417
2418 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2419 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2545bca0 2420 mpt_prt(mpt, "unable to create path for rescan\n");
cec957e9 2421 xpt_free_ccb(&ccb->ccb_h);
2545bca0
MD
2422 break;
2423 }
6d259fc1 2424
cec957e9 2425 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, /*lowpri*/5);
6d259fc1
SW
2426 ccb->ccb_h.func_code = XPT_SCAN_BUS;
2427 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
2428 ccb->crcn.flags = CAM_FLAG_NONE;
2429 xpt_action(ccb);
6d259fc1
SW
2430 /* scan is now in progress */
2431
2545bca0
MD
2432 break;
2433 }
2545bca0
MD
2434 case MPI_EVENT_LINK_STATUS_CHANGE:
2435 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2436 (data1 >> 8) & 0xff,
2437 ((data0 & 0xff) == 0)? "Failed" : "Active");
2438 break;
2439
2440 case MPI_EVENT_LOOP_STATE_CHANGE:
2441 switch ((data0 >> 16) & 0xff) {
2442 case 0x01:
2443 mpt_prt(mpt,
2444 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2445 "(Loop Initialization)\n",
2446 (data1 >> 8) & 0xff,
2447 (data0 >> 8) & 0xff,
2448 (data0 ) & 0xff);
2449 switch ((data0 >> 8) & 0xff) {
2450 case 0xF7:
2451 if ((data0 & 0xff) == 0xF7) {
2452 mpt_prt(mpt, "Device needs AL_PA\n");
2453 } else {
2454 mpt_prt(mpt, "Device %02x doesn't like "
2455 "FC performance\n",
2456 data0 & 0xFF);
2457 }
2458 break;
2459 case 0xF8:
2460 if ((data0 & 0xff) == 0xF7) {
2461 mpt_prt(mpt, "Device had loop failure "
2462 "at its receiver prior to acquiring"
2463 " AL_PA\n");
2464 } else {
2465 mpt_prt(mpt, "Device %02x detected loop"
2466 " failure at its receiver\n",
2467 data0 & 0xFF);
2468 }
2469 break;
2470 default:
2471 mpt_prt(mpt, "Device %02x requests that device "
2472 "%02x reset itself\n",
2473 data0 & 0xFF,
2474 (data0 >> 8) & 0xFF);
2475 break;
2476 }
2477 break;
2478 case 0x02:
2479 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2480 "LPE(%02x,%02x) (Loop Port Enable)\n",
2481 (data1 >> 8) & 0xff, /* Port */
2482 (data0 >> 8) & 0xff, /* Character 3 */
2483 (data0 ) & 0xff /* Character 4 */);
2484 break;
2485 case 0x03:
2486 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2487 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2488 (data1 >> 8) & 0xff, /* Port */
2489 (data0 >> 8) & 0xff, /* Character 3 */
2490 (data0 ) & 0xff /* Character 4 */);
2491 break;
2492 default:
2493 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2494 "FC event (%02x %02x %02x)\n",
2495 (data1 >> 8) & 0xff, /* Port */
2496 (data0 >> 16) & 0xff, /* Event */
2497 (data0 >> 8) & 0xff, /* Character 3 */
2498 (data0 ) & 0xff /* Character 4 */);
2499 }
2500 break;
2501
2502 case MPI_EVENT_LOGOUT:
2503 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2504 (data1 >> 8) & 0xff, data0);
2505 break;
2506 case MPI_EVENT_QUEUE_FULL:
2507 {
2508 struct cam_sim *sim;
2509 struct cam_path *tmppath;
cec957e9 2510 struct ccb_relsim *crs;
2545bca0
MD
2511 PTR_EVENT_DATA_QUEUE_FULL pqf;
2512 lun_id_t lun_id;
2513
2514 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2515 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2516 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2517 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
4c42baf4
SW
2518 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2519 pqf->TargetID) != 0) {
2545bca0
MD
2520 sim = mpt->phydisk_sim;
2521 } else {
2522 sim = mpt->sim;
2523 }
2545bca0
MD
2524 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2525 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2526 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2527 mpt_prt(mpt, "unable to create a path to send "
2528 "XPT_REL_SIMQ");
2545bca0
MD
2529 break;
2530 }
cec957e9
MD
2531 crs = &xpt_alloc_ccb()->crs;
2532 xpt_setup_ccb(&crs->ccb_h, tmppath, 5);
2533 crs->ccb_h.func_code = XPT_REL_SIMQ;
2534 crs->ccb_h.flags = CAM_DEV_QFREEZE;
2535 crs->release_flags = RELSIM_ADJUST_OPENINGS;
2536 crs->openings = pqf->CurrentDepth - 1;
2537 xpt_action((union ccb *)crs);
2538 if (crs->ccb_h.status != CAM_REQ_CMP) {
2545bca0
MD
2539 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2540 }
2541 xpt_free_path(tmppath);
cec957e9 2542 xpt_free_ccb(&crs->ccb_h);
2545bca0 2543 }
2545bca0
MD
2544 break;
2545 }
6d259fc1
SW
2546 case MPI_EVENT_IR_RESYNC_UPDATE:
2547 mpt_prt(mpt, "IR resync update %d completed\n",
2548 (data0 >> 16) & 0xff);
2549 break;
4c42baf4
SW
2550 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2551 {
2552 union ccb *ccb;
2553 struct cam_sim *sim;
2554 struct cam_path *tmppath;
2555 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
2556
2557 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
2558 if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
2559 psdsc->TargetID) != 0)
2560 sim = mpt->phydisk_sim;
2561 else
2562 sim = mpt->sim;
2563 switch(psdsc->ReasonCode) {
2564 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
cec957e9 2565 ccb = xpt_alloc_ccb();
4c42baf4
SW
2566 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
2567 cam_sim_path(sim), psdsc->TargetID,
2568 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
4c42baf4
SW
2569 mpt_prt(mpt,
2570 "unable to create path for rescan\n");
cec957e9 2571 xpt_free_ccb(&ccb->ccb_h);
4c42baf4
SW
2572 break;
2573 }
cec957e9 2574 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, /*lopri*/5);
4c42baf4
SW
2575 ccb->ccb_h.func_code = XPT_SCAN_BUS;
2576 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
2577 ccb->crcn.flags = CAM_FLAG_NONE;
2578 xpt_action(ccb);
cec957e9 2579 /* scan now in progress */
4c42baf4
SW
2580 break;
2581 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
4c42baf4
SW
2582 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2583 psdsc->TargetID, CAM_LUN_WILDCARD) !=
2584 CAM_REQ_CMP) {
2585 mpt_prt(mpt,
2586 "unable to create path for async event");
4c42baf4
SW
2587 break;
2588 }
2589 xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2590 xpt_free_path(tmppath);
4c42baf4
SW
2591 break;
2592 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
2593 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
2594 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
2595 break;
2596 default:
2597 mpt_lprt(mpt, MPT_PRT_WARN,
2598 "SAS device status change: Bus: 0x%02x TargetID: "
2599 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
2600 psdsc->TargetID, psdsc->ReasonCode);
2601 break;
2602 }
2603 break;
2604 }
2605 case MPI_EVENT_SAS_DISCOVERY_ERROR:
2606 {
2607 PTR_EVENT_DATA_DISCOVERY_ERROR pde;
2608
2609 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
2610 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
2611 mpt_lprt(mpt, MPT_PRT_WARN,
2612 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
2613 pde->Port, pde->DiscoveryStatus);
2614 break;
2615 }
2545bca0
MD
2616 case MPI_EVENT_EVENT_CHANGE:
2617 case MPI_EVENT_INTEGRATED_RAID:
4c42baf4
SW
2618 case MPI_EVENT_IR2:
2619 case MPI_EVENT_LOG_ENTRY_ADDED:
2620 case MPI_EVENT_SAS_DISCOVERY:
2621 case MPI_EVENT_SAS_PHY_LINK_STATUS:
2545bca0
MD
2622 case MPI_EVENT_SAS_SES:
2623 break;
2624 default:
2625 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
6d259fc1 2626 msg->Event & 0xFF);
2545bca0
MD
2627 return (0);
2628 }
2629 return (1);
2630}
2631
2632/*
2633 * Reply path for all SCSI I/O requests, called from our
2634 * interrupt handler by extracting our handler index from
2635 * the MsgContext field of the reply from the IOC.
2636 *
2637 * This routine is optimized for the common case of a
2638 * completion without error. All exception handling is
2639 * offloaded to non-inlined helper routines to minimize
2640 * cache footprint.
2641 */
2642static int
2643mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2644 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2645{
2646 MSG_SCSI_IO_REQUEST *scsi_req;
2647 union ccb *ccb;
2648
2649 if (req->state == REQ_STATE_FREE) {
2650 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2651 return (TRUE);
2652 }
2653
2654 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2655 ccb = req->ccb;
2656 if (ccb == NULL) {
2657 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2658 req, req->serno);
2659 return (TRUE);
2660 }
2661
2662 mpt_req_untimeout(req, mpt_timeout, ccb);
2663 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2664
2665 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2666 bus_dmasync_op_t op;
2667
2668 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2669 op = BUS_DMASYNC_POSTREAD;
2670 else
2671 op = BUS_DMASYNC_POSTWRITE;
2672 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2673 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2674 }
2675
2676 if (reply_frame == NULL) {
2677 /*
2678 * Context only reply, completion without error status.
2679 */
2680 ccb->csio.resid = 0;
2681 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2682 ccb->csio.scsi_status = SCSI_STATUS_OK;
2683 } else {
2684 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2685 }
2686
2687 if (mpt->outofbeer) {
2688 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2689 mpt->outofbeer = 0;
2690 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2691 }
2692 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2693 struct scsi_inquiry_data *iq =
2694 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2695 if (scsi_req->Function ==
2696 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2697 /*
2698 * Fake out the device type so that only the
2699 * pass-thru device will attach.
2700 */
2701 iq->device &= ~0x1F;
2702 iq->device |= T_NODEVICE;
2703 }
2704 }
2705 if (mpt->verbose == MPT_PRT_DEBUG) {
2706 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2707 req, req->serno);
2708 }
4c42baf4 2709 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2545bca0 2710 xpt_done(ccb);
2545bca0
MD
2711 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2712 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2713 } else {
2714 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2715 req, req->serno);
2716 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2717 }
2718 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2719 ("CCB req needed wakeup"));
2720#ifdef INVARIANTS
2721 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2722#endif
2723 mpt_free_request(mpt, req);
2724 return (TRUE);
2725}
2726
2727static int
2728mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2729 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2730{
2731 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2732
2733 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2734#ifdef INVARIANTS
2735 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2736#endif
2737 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2738 /* Record IOC Status and Response Code of TMF for any waiters. */
2739 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2740 req->ResponseCode = tmf_reply->ResponseCode;
2741
2742 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2743 req, req->serno, le16toh(tmf_reply->IOCStatus));
2744 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2745 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2746 req->state |= REQ_STATE_DONE;
2747 wakeup(req);
2748 } else {
2749 mpt->tmf_req->state = REQ_STATE_FREE;
2750 }
2751 return (TRUE);
2752}
2753
2754/*
2755 * XXX: Move to definitions file
2756 */
2757#define ELS 0x22
2758#define FC4LS 0x32
2759#define ABTS 0x81
2760#define BA_ACC 0x84
2761
2762#define LS_RJT 0x01
2763#define LS_ACC 0x02
2764#define PLOGI 0x03
2765#define LOGO 0x05
2766#define SRR 0x14
2767#define PRLI 0x20
2768#define PRLO 0x21
2769#define ADISC 0x52
2770#define RSCN 0x61
2771
2772static void
2773mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2774 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2775{
2776 uint32_t fl;
2777 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2778 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2779
2780 /*
2781 * We are going to reuse the ELS request to send this response back.
2782 */
2783 rsp = &tmp;
2784 memset(rsp, 0, sizeof(*rsp));
2785
2786#ifdef USE_IMMEDIATE_LINK_DATA
2787 /*
2788 * Apparently the IMMEDIATE stuff doesn't seem to work.
2789 */
2790 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2791#endif
2792 rsp->RspLength = length;
2793 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2794 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2795
2796 /*
2797 * Copy over information from the original reply frame to
2798 * it's correct place in the response.
2799 */
2800 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2801
2802 /*
2803 * And now copy back the temporary area to the original frame.
2804 */
2805 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2806 rsp = req->req_vbuf;
2807
2808#ifdef USE_IMMEDIATE_LINK_DATA
2809 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2810#else
2811{
2812 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2813 bus_addr_t paddr = req->req_pbuf;
2814 paddr += MPT_RQSL(mpt);
2815
2816 fl =
2817 MPI_SGE_FLAGS_HOST_TO_IOC |
2818 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2819 MPI_SGE_FLAGS_LAST_ELEMENT |
2820 MPI_SGE_FLAGS_END_OF_LIST |
2821 MPI_SGE_FLAGS_END_OF_BUFFER;
2822 fl <<= MPI_SGE_FLAGS_SHIFT;
2823 fl |= (length);
2824 se->FlagsLength = htole32(fl);
2825 se->Address = htole32((uint32_t) paddr);
2826}
2827#endif
2828
2829 /*
2830 * Send it on...
2831 */
2832 mpt_send_cmd(mpt, req);
2833}
2834
2835static int
2836mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2837 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2838{
2839 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2840 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2841 U8 rctl;
2842 U8 type;
2843 U8 cmd;
2844 U16 status = le16toh(reply_frame->IOCStatus);
2845 U32 *elsbuf;
2846 int ioindex;
2847 int do_refresh = TRUE;
2848
2849#ifdef INVARIANTS
2850 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2851 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2852 req, req->serno, rp->Function));
2853 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2854 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2855 } else {
2856 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2857 }
2858#endif
2859 mpt_lprt(mpt, MPT_PRT_DEBUG,
2860 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2861 req, req->serno, reply_frame, reply_frame->Function);
2862
2863 if (status != MPI_IOCSTATUS_SUCCESS) {
2864 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2865 status, reply_frame->Function);
2866 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2867 /*
2868 * XXX: to get around shutdown issue
2869 */
2870 mpt->disabled = 1;
2871 return (TRUE);
2872 }
2873 return (TRUE);
2874 }
2875
2876 /*
2877 * If the function of a link service response, we recycle the
2878 * response to be a refresh for a new link service request.
2879 *
2880 * The request pointer is bogus in this case and we have to fetch
2881 * it based upon the TransactionContext.
2882 */
2883 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2884 /* Freddie Uncle Charlie Katie */
2885 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2886 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2887 if (mpt->els_cmd_ptrs[ioindex] == req) {
2888 break;
2889 }
2890
2891 KASSERT(ioindex < mpt->els_cmds_allocated,
2892 ("can't find my mommie!"));
2893
2894 /* remove from active list as we're going to re-post it */
2895 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2896 req->state &= ~REQ_STATE_QUEUED;
2897 req->state |= REQ_STATE_DONE;
2898 mpt_fc_post_els(mpt, req, ioindex);
2899 return (TRUE);
2900 }
2901
2902 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2903 /* remove from active list as we're done */
2904 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2905 req->state &= ~REQ_STATE_QUEUED;
2906 req->state |= REQ_STATE_DONE;
2907 if (req->state & REQ_STATE_TIMEDOUT) {
2908 mpt_lprt(mpt, MPT_PRT_DEBUG,
2909 "Sync Primitive Send Completed After Timeout\n");
2910 mpt_free_request(mpt, req);
2911 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2912 mpt_lprt(mpt, MPT_PRT_DEBUG,
2913 "Async Primitive Send Complete\n");
2914 mpt_free_request(mpt, req);
2915 } else {
2916 mpt_lprt(mpt, MPT_PRT_DEBUG,
2917 "Sync Primitive Send Complete- Waking Waiter\n");
2918 wakeup(req);
2919 }
2920 return (TRUE);
2921 }
2922
2923 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2924 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2925 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2926 rp->MsgLength, rp->MsgFlags);
2927 return (TRUE);
2928 }
2929
2930 if (rp->MsgLength <= 5) {
2931 /*
2932 * This is just a ack of an original ELS buffer post
2933 */
2934 mpt_lprt(mpt, MPT_PRT_DEBUG,
2935 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2936 return (TRUE);
2937 }
2938
2939
2940 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2941 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2942
2943 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2944 cmd = be32toh(elsbuf[0]) >> 24;
2945
2946 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2947 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2948 return (TRUE);
2949 }
2950
2951 ioindex = le32toh(rp->TransactionContext);
2952 req = mpt->els_cmd_ptrs[ioindex];
2953
2954 if (rctl == ELS && type == 1) {
2955 switch (cmd) {
2956 case PRLI:
2957 /*
2958 * Send back a PRLI ACC
2959 */
2960 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2961 le32toh(rp->Wwn.PortNameHigh),
2962 le32toh(rp->Wwn.PortNameLow));
2963 elsbuf[0] = htobe32(0x02100014);
2964 elsbuf[1] |= htobe32(0x00000100);
2965 elsbuf[4] = htobe32(0x00000002);
2966 if (mpt->role & MPT_ROLE_TARGET)
2967 elsbuf[4] |= htobe32(0x00000010);
2968 if (mpt->role & MPT_ROLE_INITIATOR)
2969 elsbuf[4] |= htobe32(0x00000020);
2970 /* remove from active list as we're done */
2971 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2972 req->state &= ~REQ_STATE_QUEUED;
2973 req->state |= REQ_STATE_DONE;
2974 mpt_fc_els_send_response(mpt, req, rp, 20);
2975 do_refresh = FALSE;
2976 break;
2977 case PRLO:
2978 memset(elsbuf, 0, 5 * (sizeof (U32)));
2979 elsbuf[0] = htobe32(0x02100014);
2980 elsbuf[1] = htobe32(0x08000100);
2981 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2982 le32toh(rp->Wwn.PortNameHigh),
2983 le32toh(rp->Wwn.PortNameLow));
2984 /* remove from active list as we're done */
2985 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2986 req->state &= ~REQ_STATE_QUEUED;
2987 req->state |= REQ_STATE_DONE;
2988 mpt_fc_els_send_response(mpt, req, rp, 20);
2989 do_refresh = FALSE;
2990 break;
2991 default:
2992 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2993 break;
2994 }
2995 } else if (rctl == ABTS && type == 0) {
2996 uint16_t rx_id = le16toh(rp->Rxid);
2997 uint16_t ox_id = le16toh(rp->Oxid);
2998 request_t *tgt_req = NULL;
2999
3000 mpt_prt(mpt,
3001 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
3002 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
3003 le32toh(rp->Wwn.PortNameLow));
3004 if (rx_id >= mpt->mpt_max_tgtcmds) {
3005 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
3006 } else if (mpt->tgt_cmd_ptrs == NULL) {
3007 mpt_prt(mpt, "No TGT CMD PTRS\n");
3008 } else {
3009 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
3010 }
3011 if (tgt_req) {
3012 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
6d259fc1 3013 union ccb *ccb;
2545bca0
MD
3014 uint32_t ct_id;
3015
3016 /*
3017 * Check to make sure we have the correct command
3018 * The reply descriptor in the target state should
3019 * should contain an IoIndex that should match the
3020 * RX_ID.
3021 *
3022 * It'd be nice to have OX_ID to crosscheck with
3023 * as well.
3024 */
3025 ct_id = GET_IO_INDEX(tgt->reply_desc);
3026
3027 if (ct_id != rx_id) {
3028 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
3029 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
3030 rx_id, ct_id);
3031 goto skip;
3032 }
3033
3034 ccb = tgt->ccb;
3035 if (ccb) {
3036 mpt_prt(mpt,
3037 "CCB (%p): lun %u flags %x status %x\n",
3038 ccb, ccb->ccb_h.target_lun,
3039 ccb->ccb_h.flags, ccb->ccb_h.status);
3040 }
3041 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
3042 "%x nxfers %x\n", tgt->state,
3043 tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
3044 tgt->nxfers);
3045 skip:
3046 if (mpt_abort_target_cmd(mpt, tgt_req)) {
3047 mpt_prt(mpt, "unable to start TargetAbort\n");
3048 }
3049 } else {
3050 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3051 }
3052 memset(elsbuf, 0, 5 * (sizeof (U32)));
3053 elsbuf[0] = htobe32(0);
3054 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3055 elsbuf[2] = htobe32(0x000ffff);
3056 /*
6d259fc1 3057 * Dork with the reply frame so that the response to it
2545bca0
MD
3058 * will be correct.
3059 */
3060 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3061 /* remove from active list as we're done */
3062 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3063 req->state &= ~REQ_STATE_QUEUED;
3064 req->state |= REQ_STATE_DONE;
3065 mpt_fc_els_send_response(mpt, req, rp, 12);
3066 do_refresh = FALSE;
3067 } else {
3068 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3069 }
3070 if (do_refresh == TRUE) {
3071 /* remove from active list as we're done */
3072 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3073 req->state &= ~REQ_STATE_QUEUED;
3074 req->state |= REQ_STATE_DONE;
3075 mpt_fc_post_els(mpt, req, ioindex);
3076 }
3077 return (TRUE);
3078}
3079
3080/*
3081 * Clean up all SCSI Initiator personality state in response
3082 * to a controller reset.
3083 */
3084static void
3085mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3086{
4c42baf4 3087
2545bca0
MD
3088 /*
3089 * The pending list is already run down by
3090 * the generic handler. Perform the same
3091 * operation on the timed out request list.
3092 */
3093 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3094 MPI_IOCSTATUS_INVALID_STATE);
3095
3096 /*
3097 * XXX: We need to repost ELS and Target Command Buffers?
3098 */
3099
3100 /*
3101 * Inform the XPT that a bus reset has occurred.
3102 */
3103 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3104}
3105
3106/*
3107 * Parse additional completion information in the reply
3108 * frame for SCSI I/O requests.
3109 */
3110static int
3111mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3112 MSG_DEFAULT_REPLY *reply_frame)
3113{
3114 union ccb *ccb;
3115 MSG_SCSI_IO_REPLY *scsi_io_reply;
3116 u_int ioc_status;
3117 u_int sstate;
3118
3119 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3120 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3121 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3122 ("MPT SCSI I/O Handler called with incorrect reply type"));
3123 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3124 ("MPT SCSI I/O Handler called with continuation reply"));
3125
3126 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3127 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3128 ioc_status &= MPI_IOCSTATUS_MASK;
3129 sstate = scsi_io_reply->SCSIState;
3130
3131 ccb = req->ccb;
3132 ccb->csio.resid =
3133 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3134
3135 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3136 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
4c42baf4
SW
3137 uint32_t sense_returned;
3138
2545bca0 3139 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
4c42baf4
SW
3140
3141 sense_returned = le32toh(scsi_io_reply->SenseCount);
3142 if (sense_returned < ccb->csio.sense_len)
3143 ccb->csio.sense_resid = ccb->csio.sense_len -
3144 sense_returned;
3145 else
3146 ccb->csio.sense_resid = 0;
3147
bc14747b 3148 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
2545bca0 3149 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
4c42baf4 3150 min(ccb->csio.sense_len, sense_returned));
2545bca0
MD
3151 }
3152
3153 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3154 /*
3155 * Tag messages rejected, but non-tagged retry
3156 * was successful.
3157XXXX
3158 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3159 */
3160 }
3161
3162 switch(ioc_status) {
3163 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3164 /*
3165 * XXX
3166 * Linux driver indicates that a zero
3167 * transfer length with this error code
3168 * indicates a CRC error.
3169 *
3170 * No need to swap the bytes for checking
3171 * against zero.
3172 */
3173 if (scsi_io_reply->TransferCount == 0) {
3174 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3175 break;
3176 }
3177 /* FALLTHROUGH */
3178 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3179 case MPI_IOCSTATUS_SUCCESS:
3180 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3181 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3182 /*
3183 * Status was never returned for this transaction.
3184 */
3185 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3186 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3187 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3188 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3189 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3190 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3191 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3192
6d259fc1 3193 /* XXX Handle SPI-Packet and FCP-2 response info. */
2545bca0
MD
3194 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3195 } else
3196 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3197 break;
3198 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3199 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3200 break;
3201 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3202 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3203 break;
3204 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3205 /*
3206 * Since selection timeouts and "device really not
3207 * there" are grouped into this error code, report
3208 * selection timeout. Selection timeouts are
3209 * typically retried before giving up on the device
3210 * whereas "device not there" errors are considered
3211 * unretryable.
3212 */
3213 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3214 break;
3215 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3216 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3217 break;
3218 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3219 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3220 break;
3221 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3222 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3223 break;
3224 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3225 ccb->ccb_h.status = CAM_UA_TERMIO;
3226 break;
3227 case MPI_IOCSTATUS_INVALID_STATE:
3228 /*
3229 * The IOC has been reset. Emulate a bus reset.
3230 */
3231 /* FALLTHROUGH */
3232 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3233 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3234 break;
3235 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3236 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3237 /*
3238 * Don't clobber any timeout status that has
3239 * already been set for this transaction. We
3240 * want the SCSI layer to be able to differentiate
3241 * between the command we aborted due to timeout
3242 * and any innocent bystanders.
3243 */
3244 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3245 break;
3246 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3247 break;
3248
3249 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3250 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3251 break;
3252 case MPI_IOCSTATUS_BUSY:
3253 mpt_set_ccb_status(ccb, CAM_BUSY);
3254 break;
3255 case MPI_IOCSTATUS_INVALID_FUNCTION:
3256 case MPI_IOCSTATUS_INVALID_SGL:
3257 case MPI_IOCSTATUS_INTERNAL_ERROR:
3258 case MPI_IOCSTATUS_INVALID_FIELD:
3259 default:
3260 /* XXX
3261 * Some of the above may need to kick
3262 * of a recovery action!!!!
3263 */
3264 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3265 break;
3266 }
3267
3268 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3269 mpt_freeze_ccb(ccb);
3270 }
3271
3272 return (TRUE);
3273}
3274
3275static void
3276mpt_action(struct cam_sim *sim, union ccb *ccb)
3277{
3278 struct mpt_softc *mpt;
3279 struct ccb_trans_settings *cts;
3280 target_id_t tgt;
3281 lun_id_t lun;
3282 int raid_passthru;
3283
3284 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3285
3286 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3287 raid_passthru = (sim == mpt->phydisk_sim);
3288 MPT_LOCK_ASSERT(mpt);
3289
3290 tgt = ccb->ccb_h.target_id;
3291 lun = ccb->ccb_h.target_lun;
3292 if (raid_passthru &&
3293 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3294 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3295 ccb->ccb_h.func_code != XPT_RESET_DEV) {
2545bca0 3296 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2545bca0
MD
3297 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3298 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3299 xpt_done(ccb);
3300 return;
3301 }
2545bca0
MD
3302 }
3303 ccb->ccb_h.ccb_mpt_ptr = mpt;
3304
3305 switch (ccb->ccb_h.func_code) {
3306 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3307 /*
3308 * Do a couple of preliminary checks...
3309 */
3310 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3311 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3312 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3313 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3314 break;
3315 }
3316 }
3317 /* Max supported CDB length is 16 bytes */
3318 /* XXX Unless we implement the new 32byte message type */
3319 if (ccb->csio.cdb_len >
3320 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3321 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3322 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3323 break;
3324 }
3325#ifdef MPT_TEST_MULTIPATH
3326 if (mpt->failure_id == ccb->ccb_h.target_id) {
3327 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3328 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3329 break;
3330 }
3331#endif
3332 ccb->csio.scsi_status = SCSI_STATUS_OK;
3333 mpt_start(sim, ccb);
3334 return;
3335
3336 case XPT_RESET_BUS:
3337 if (raid_passthru) {
3338 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3339 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3340 break;
3341 }
3342 case XPT_RESET_DEV:
3343 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3344 if (bootverbose) {
3345 xpt_print(ccb->ccb_h.path, "reset bus\n");
3346 }
3347 } else {
3348 xpt_print(ccb->ccb_h.path, "reset device\n");
3349 }
2545bca0 3350 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
2545bca0
MD
3351
3352 /*
3353 * mpt_bus_reset is always successful in that it
3354 * will fall back to a hard reset should a bus
3355 * reset attempt fail.
3356 */
3357 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3358 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3359 break;
3360
3361 case XPT_ABORT:
3362 {
3363 union ccb *accb = ccb->cab.abort_ccb;
2545bca0
MD
3364 switch (accb->ccb_h.func_code) {
3365 case XPT_ACCEPT_TARGET_IO:
3366 case XPT_IMMED_NOTIFY:
3367 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3368 break;
3369 case XPT_CONT_TARGET_IO:
3370 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3371 ccb->ccb_h.status = CAM_UA_ABORT;
3372 break;
3373 case XPT_SCSI_IO:
3374 ccb->ccb_h.status = CAM_UA_ABORT;
3375 break;
3376 default:
3377 ccb->ccb_h.status = CAM_REQ_INVALID;
3378 break;
3379 }
2545bca0
MD
3380 break;
3381 }
3382
2545bca0 3383#define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
2545bca0
MD
3384#define DP_DISC_ENABLE 0x1
3385#define DP_DISC_DISABL 0x2
3386#define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3387
3388#define DP_TQING_ENABLE 0x4
3389#define DP_TQING_DISABL 0x8
3390#define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3391
3392#define DP_WIDE 0x10
3393#define DP_NARROW 0x20
3394#define DP_WIDTH (DP_WIDE|DP_NARROW)
3395
3396#define DP_SYNC 0x40
3397
3398 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3399 {
2545bca0
MD
3400 struct ccb_trans_settings_scsi *scsi;
3401 struct ccb_trans_settings_spi *spi;
2545bca0
MD
3402 uint8_t dval;
3403 u_int period;
3404 u_int offset;
3405 int i, j;
3406
3407 cts = &ccb->cts;
3408
3409 if (mpt->is_fc || mpt->is_sas) {
3410 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3411 break;
3412 }
3413
2545bca0
MD
3414 scsi = &cts->proto_specific.scsi;
3415 spi = &cts->xport_specific.spi;
3416
3417 /*
3418 * We can be called just to valid transport and proto versions
3419 */
3420 if (scsi->valid == 0 && spi->valid == 0) {
3421 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3422 break;
3423 }
2545bca0
MD
3424
3425 /*
3426 * Skip attempting settings on RAID volume disks.
3427 * Other devices on the bus get the normal treatment.
3428 */
3429 if (mpt->phydisk_sim && raid_passthru == 0 &&
3430 mpt_is_raid_volume(mpt, tgt) != 0) {
3431 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3432 "no transfer settings for RAID vols\n");
3433 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3434 break;
3435 }
3436
3437 i = mpt->mpt_port_page2.PortSettings &
3438 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3439 j = mpt->mpt_port_page2.PortFlags &
3440 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3441 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3442 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3443 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3444 "honoring BIOS transfer negotiations\n");
3445 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3446 break;
3447 }
3448
3449 dval = 0;
3450 period = 0;
3451 offset = 0;
3452
2545bca0
MD
3453 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3454 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3455 DP_DISC_ENABLE : DP_DISC_DISABL;
3456 }
3457
3458 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3459 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3460 DP_TQING_ENABLE : DP_TQING_DISABL;
3461 }
3462
3463 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3464 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3465 DP_WIDE : DP_NARROW;
3466 }
3467
3468 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3469 dval |= DP_SYNC;
3470 offset = spi->sync_offset;
3471 } else {
3472 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3473 &mpt->mpt_dev_page1[tgt];
3474 offset = ptr->RequestedParameters;
3475 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3476 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3477 }
3478 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3479 dval |= DP_SYNC;
3480 period = spi->sync_period;
3481 } else {
3482 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3483 &mpt->mpt_dev_page1[tgt];
3484 period = ptr->RequestedParameters;
3485 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3486 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3487 }
2545bca0
MD
3488 if (dval & DP_DISC_ENABLE) {
3489 mpt->mpt_disc_enable |= (1 << tgt);
3490 } else if (dval & DP_DISC_DISABL) {
3491 mpt->mpt_disc_enable &= ~(1 << tgt);
3492 }
3493 if (dval & DP_TQING_ENABLE) {
3494 mpt->mpt_tag_enable |= (1 << tgt);
3495 } else if (dval & DP_TQING_DISABL) {
3496 mpt->mpt_tag_enable &= ~(1 << tgt);
3497 }
3498 if (dval & DP_WIDTH) {
3499 mpt_setwidth(mpt, tgt, 1);
3500 }
3501 if (dval & DP_SYNC) {
3502 mpt_setsync(mpt, tgt, period, offset);
3503 }
3504 if (dval == 0) {
2545bca0
MD
3505 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3506 break;
3507 }
3508 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3509 "set [%d]: 0x%x period 0x%x offset %d\n",
3510 tgt, dval, period, offset);
3511 if (mpt_update_spi_config(mpt, tgt)) {
3512 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3513 } else {
3514 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3515 }
2545bca0
MD
3516 break;
3517 }
3518 case XPT_GET_TRAN_SETTINGS:
3519 {
2545bca0
MD
3520 struct ccb_trans_settings_scsi *scsi;
3521 cts = &ccb->cts;
3522 cts->protocol = PROTO_SCSI;
3523 if (mpt->is_fc) {
3524 struct ccb_trans_settings_fc *fc =
3525 &cts->xport_specific.fc;
3526 cts->protocol_version = SCSI_REV_SPC;
3527 cts->transport = XPORT_FC;
3528 cts->transport_version = 0;
3529 fc->valid = CTS_FC_VALID_SPEED;
3530 fc->bitrate = 100000;
3531 } else if (mpt->is_sas) {
3532 struct ccb_trans_settings_sas *sas =
3533 &cts->xport_specific.sas;
3534 cts->protocol_version = SCSI_REV_SPC2;
3535 cts->transport = XPORT_SAS;
3536 cts->transport_version = 0;
3537 sas->valid = CTS_SAS_VALID_SPEED;
3538 sas->bitrate = 300000;
3539 } else {
3540 cts->protocol_version = SCSI_REV_2;
3541 cts->transport = XPORT_SPI;
3542 cts->transport_version = 2;
3543 if (mpt_get_spi_settings(mpt, cts) != 0) {
3544 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3545 break;
3546 }
3547 }
3548 scsi = &cts->proto_specific.scsi;
3549 scsi->valid = CTS_SCSI_VALID_TQ;
3550 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2545bca0
MD
3551 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3552 break;
3553 }
3554 case XPT_CALC_GEOMETRY:
3555 {
3556 struct ccb_calc_geometry *ccg;
3557
3558 ccg = &ccb->ccg;
3559 if (ccg->block_size == 0) {
3560 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3561 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3562 break;
3563 }
52001f09 3564 cam_calc_geometry(ccg, /*extended*/1);
4c42baf4 3565 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
2545bca0
MD
3566 break;
3567 }
3568 case XPT_PATH_INQ: /* Path routing inquiry */
3569 {
3570 struct ccb_pathinq *cpi = &ccb->cpi;
3571
3572 cpi->version_num = 1;
3573 cpi->target_sprt = 0;
3574 cpi->hba_eng_cnt = 0;
3575 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
6d259fc1
SW
3576#if 0 /* XXX swildner */
3577 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3578#endif
2545bca0
MD
3579 /*
3580 * FC cards report MAX_DEVICES of 512, but
3581 * the MSG_SCSI_IO_REQUEST target id field
3582 * is only 8 bits. Until we fix the driver
3583 * to support 'channels' for bus overflow,
3584 * just limit it.
3585 */
3586 if (cpi->max_target > 255) {
3587 cpi->max_target = 255;
3588 }
3589
3590 /*
3591 * VMware ESX reports > 16 devices and then dies when we probe.
3592 */
3593 if (mpt->is_spi && cpi->max_target > 15) {
3594 cpi->max_target = 15;
3595 }
3596 if (mpt->is_spi)
3597 cpi->max_lun = 7;
3598 else
3599 cpi->max_lun = MPT_MAX_LUNS;
3600 cpi->initiator_id = mpt->mpt_ini_id;
3601 cpi->bus_id = cam_sim_bus(sim);
3602
3603 /*
3604 * The base speed is the speed of the underlying connection.
3605 */
2545bca0
MD
3606 cpi->protocol = PROTO_SCSI;
3607 if (mpt->is_fc) {
3608 cpi->hba_misc = PIM_NOBUSRESET;
3609 cpi->base_transfer_speed = 100000;
3610 cpi->hba_inquiry = PI_TAG_ABLE;
3611 cpi->transport = XPORT_FC;
3612 cpi->transport_version = 0;
3613 cpi->protocol_version = SCSI_REV_SPC;
3614 } else if (mpt->is_sas) {
3615 cpi->hba_misc = PIM_NOBUSRESET;
3616 cpi->base_transfer_speed = 300000;
3617 cpi->hba_inquiry = PI_TAG_ABLE;
3618 cpi->transport = XPORT_SAS;
3619 cpi->transport_version = 0;
3620 cpi->protocol_version = SCSI_REV_SPC2;
3621 } else {
3622 cpi->hba_misc = PIM_SEQSCAN;
3623 cpi->base_transfer_speed = 3300;
3624 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3625 cpi->transport = XPORT_SPI;
3626 cpi->transport_version = 2;
3627 cpi->protocol_version = SCSI_REV_2;
3628 }
2545bca0
MD
3629
3630 /*
3631 * We give our fake RAID passhtru bus a width that is MaxVolumes
3632 * wide and restrict it to one lun.
3633 */
3634 if (raid_passthru) {
3635 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3636 cpi->initiator_id = cpi->max_target + 1;
3637 cpi->max_lun = 0;
3638 }
3639
3640 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3641 cpi->hba_misc |= PIM_NOINITIATOR;
3642 }
3643 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3644 cpi->target_sprt =
3645 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3646 } else {
3647 cpi->target_sprt = 0;
3648 }
3649 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3650 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3651 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3652 cpi->unit_number = cam_sim_unit(sim);
3653 cpi->ccb_h.status = CAM_REQ_CMP;
3654 break;
3655 }
3656 case XPT_EN_LUN: /* Enable LUN as a target */
3657 {
3658 int result;
3659
2545bca0
MD
3660 if (ccb->cel.enable)
3661 result = mpt_enable_lun(mpt,
3662 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3663 else
3664 result = mpt_disable_lun(mpt,
3665 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2545bca0
MD
3666 if (result == 0) {
3667 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3668 } else {
3669 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3670 }
3671 break;
3672 }
3673 case XPT_NOTIFY_ACK: /* recycle notify ack */
3674 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
3675 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3676 {
3677 tgt_resource_t *trtp;
3678 lun_id_t lun = ccb->ccb_h.target_lun;
3679 ccb->ccb_h.sim_priv.entries[0].field = 0;
3680 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3681 ccb->ccb_h.flags = 0;
3682
3683 if (lun == CAM_LUN_WILDCARD) {
3684 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3685 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3686 break;
3687 }
3688 trtp = &mpt->trt_wildcard;
3689 } else if (lun >= MPT_MAX_LUNS) {
3690 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3691 break;
3692 } else {
3693 trtp = &mpt->trt[lun];
3694 }
2545bca0
MD
3695 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3696 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3697 "Put FREE ATIO %p lun %d\n", ccb, lun);
3698 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3699 sim_links.stqe);
3700 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3701 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3702 "Put FREE INOT lun %d\n", lun);
3703 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3704 sim_links.stqe);
3705 } else {
3706 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3707 }
3708 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
2545bca0
MD
3709 return;
3710 }
3711 case XPT_CONT_TARGET_IO:
2545bca0 3712 mpt_target_start_io(mpt, ccb);
2545bca0
MD
3713 return;
3714
3715 default:
3716 ccb->ccb_h.status = CAM_REQ_INVALID;
3717 break;
3718 }
3719 xpt_done(ccb);
3720}
3721
3722static int
3723mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3724{
2545bca0
MD
3725 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3726 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
2545bca0
MD
3727 target_id_t tgt;
3728 uint32_t dval, pval, oval;
3729 int rv;
3730
3731 if (IS_CURRENT_SETTINGS(cts) == 0) {
3732 tgt = cts->ccb_h.target_id;
3733 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3734 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3735 return (-1);
3736 }
3737 } else {
3738 tgt = cts->ccb_h.target_id;
3739 }
3740
3741 /*
3742 * We aren't looking at Port Page 2 BIOS settings here-
3743 * sometimes these have been known to be bogus XXX.
3744 *
3745 * For user settings, we pick the max from port page 0
3746 *
3747 * For current settings we read the current settings out from
3748 * device page 0 for that target.
3749 */
3750 if (IS_CURRENT_SETTINGS(cts)) {
3751 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3752 dval = 0;
3753
2545bca0
MD
3754 tmp = mpt->mpt_dev_page0[tgt];
3755 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3756 sizeof(tmp), FALSE, 5000);
3757 if (rv) {
2545bca0
MD
3758 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3759 return (rv);
3760 }
3761 mpt2host_config_page_scsi_device_0(&tmp);
3762
2545bca0 3763 mpt_lprt(mpt, MPT_PRT_DEBUG,
6d259fc1
SW
3764 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3765 tmp.NegotiatedParameters, tmp.Information);
2545bca0
MD
3766 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3767 DP_WIDE : DP_NARROW;
3768 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3769 DP_DISC_ENABLE : DP_DISC_DISABL;
3770 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3771 DP_TQING_ENABLE : DP_TQING_DISABL;
3772 oval = tmp.NegotiatedParameters;
3773 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3774 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3775 pval = tmp.NegotiatedParameters;
3776 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3777 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3778 mpt->mpt_dev_page0[tgt] = tmp;
3779 } else {
3780 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3781 oval = mpt->mpt_port_page0.Capabilities;
3782 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3783 pval = mpt->mpt_port_page0.Capabilities;
3784 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3785 }
3786
2545bca0
MD
3787 spi->valid = 0;
3788 scsi->valid = 0;
3789 spi->flags = 0;
3790 scsi->flags = 0;
3791 spi->sync_offset = oval;
3792 spi->sync_period = pval;
3793 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3794 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3795 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3796 if (dval & DP_WIDE) {
3797 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3798 } else {
3799 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3800 }
3801 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3802 scsi->valid = CTS_SCSI_VALID_TQ;
3803 if (dval & DP_TQING_ENABLE) {
3804 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3805 }
3806 spi->valid |= CTS_SPI_VALID_DISC;
3807 if (dval & DP_DISC_ENABLE) {
3808 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3809 }
3810 }
2545bca0
MD
3811 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3812 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
f582582c 3813 IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
2545bca0
MD
3814 return (0);
3815}
3816
3817static void
3818mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3819{
3820 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3821
3822 ptr = &mpt->mpt_dev_page1[tgt];
3823 if (onoff) {
3824 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3825 } else {
3826 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3827 }
3828}
3829
3830static void
3831mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3832{
3833 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3834
3835 ptr = &mpt->mpt_dev_page1[tgt];
3836 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3837 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3838 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3839 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3840 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3841 if (period == 0) {
3842 return;
3843 }
3844 ptr->RequestedParameters |=
3845 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3846 ptr->RequestedParameters |=
3847 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3848 if (period < 0xa) {
3849 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3850 }
3851 if (period < 0x9) {
3852 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3853 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3854 }
3855}
3856
3857static int
3858mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3859{
3860 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3861 int rv;
3862
3863 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3864 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
6d259fc1 3865 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
2545bca0
MD
3866 tmp = mpt->mpt_dev_page1[tgt];
3867 host2mpt_config_page_scsi_device_1(&tmp);
3868 rv = mpt_write_cur_cfg_page(mpt, tgt,
3869 &tmp.Header, sizeof(tmp), FALSE, 5000);
3870 if (rv) {
3871 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3872 return (-1);
3873 }
3874 return (0);
3875}
3876
2545bca0
MD
3877/****************************** Timeout Recovery ******************************/
3878static int
3879mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3880{
3881 int error;
3882
f582582c
SW
3883 error = kthread_create(mpt_recovery_thread, mpt,
3884 &mpt->recovery_thread, "mpt_recovery%d", mpt->unit);
2545bca0
MD
3885 return (error);
3886}
3887
3888static void
3889mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3890{
4c42baf4 3891
2545bca0
MD
3892 if (mpt->recovery_thread == NULL) {
3893 return;
3894 }
3895 mpt->shutdwn_recovery = 1;
3896 wakeup(mpt);
3897 /*
3898 * Sleep on a slightly different location
3899 * for this interlock just for added safety.
3900 */
6d259fc1 3901 mpt_sleep(mpt, &mpt->recovery_thread, 0, "thtrm", 0);
2545bca0
MD
3902}
3903
3904static void
3905mpt_recovery_thread(void *arg)
3906{
3907 struct mpt_softc *mpt;
3908
3909 mpt = (struct mpt_softc *)arg;
3910 MPT_LOCK(mpt);
3911 for (;;) {
3912 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3913 if (mpt->shutdwn_recovery == 0) {
6d259fc1 3914 mpt_sleep(mpt, mpt, 0, "idle", 0);
2545bca0
MD
3915 }
3916 }
3917 if (mpt->shutdwn_recovery != 0) {
3918 break;
3919 }
3920 mpt_recover_commands(mpt);
3921 }
3922 mpt->recovery_thread = NULL;
3923 wakeup(&mpt->recovery_thread);
3924 MPT_UNLOCK(mpt);
f582582c 3925 kthread_exit();
2545bca0
MD
3926}
3927
3928static int
3929mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3930 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3931{
3932 MSG_SCSI_TASK_MGMT *tmf_req;
3933 int error;
3934
3935 /*
3936 * Wait for any current TMF request to complete.
3937 * We're only allowed to issue one TMF at a time.
3938 */
3939 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3940 sleep_ok, MPT_TMF_MAX_TIMEOUT);
3941 if (error != 0) {
3942 mpt_reset(mpt, TRUE);
3943 return (ETIMEDOUT);
3944 }
3945
3946 mpt_assign_serno(mpt, mpt->tmf_req);
3947 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
3948
3949 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
3950 memset(tmf_req, 0, sizeof(*tmf_req));
3951 tmf_req->TargetID = target;
3952 tmf_req->Bus = channel;
3953 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
3954 tmf_req->TaskType = type;
3955 tmf_req->MsgFlags = flags;
3956 tmf_req->MsgContext =
3957 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
3958 if (lun > MPT_MAX_LUNS) {
3959 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
3960 tmf_req->LUN[1] = lun & 0xff;
3961 } else {
3962 tmf_req->LUN[1] = lun;
3963 }
3964 tmf_req->TaskMsgContext = abort_ctx;
3965
3966 mpt_lprt(mpt, MPT_PRT_DEBUG,
6d259fc1
SW
3967 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
3968 mpt->tmf_req->serno, tmf_req->MsgContext);
2545bca0
MD
3969 if (mpt->verbose > MPT_PRT_DEBUG) {
3970 mpt_print_request(tmf_req);
3971 }
3972
3973 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
3974 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
3975 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
3976 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
3977 if (error != MPT_OK) {
3978 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
3979 mpt->tmf_req->state = REQ_STATE_FREE;
3980 mpt_reset(mpt, TRUE);
3981 }
3982 return (error);
3983}
3984
3985/*
3986 * When a command times out, it is placed on the requeust_timeout_list
3987 * and we wake our recovery thread. The MPT-Fusion architecture supports
3988 * only a single TMF operation at a time, so we serially abort/bdr, etc,
3989 * the timedout transactions. The next TMF is issued either by the
3990 * completion handler of the current TMF waking our recovery thread,
3991 * or the TMF timeout handler causing a hard reset sequence.
3992 */
3993static void
3994mpt_recover_commands(struct mpt_softc *mpt)
3995{
3996 request_t *req;
3997 union ccb *ccb;
3998 int error;
3999
4000 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4001 /*
4002 * No work to do- leave.
4003 */
4004 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4005 return;
4006 }
4007
4008 /*
4009 * Flush any commands whose completion coincides with their timeout.
4010 */
4011 mpt_intr(mpt);
4012
4013 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4014 /*
4015 * The timedout commands have already
4016 * completed. This typically means
4017 * that either the timeout value was on
4018 * the hairy edge of what the device
4019 * requires or - more likely - interrupts
4020 * are not happening.
4021 */
4022 mpt_prt(mpt, "Timedout requests already complete. "
4023 "Interrupts may not be functioning.\n");
4024 mpt_enable_ints(mpt);
4025 return;
4026 }
4027
4028 /*
4029 * We have no visibility into the current state of the
4030 * controller, so attempt to abort the commands in the
4031 * order they timed-out. For initiator commands, we
4032 * depend on the reply handler pulling requests off
4033 * the timeout list.
4034 */
4035 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4036 uint16_t status;
4037 uint8_t response;
4038 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4039
4040 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4041 req, req->serno, hdrp->Function);
4042 ccb = req->ccb;
4043 if (ccb == NULL) {
4044 mpt_prt(mpt, "null ccb in timed out request. "
4045 "Resetting Controller.\n");
4046 mpt_reset(mpt, TRUE);
4047 continue;
4048 }
4049 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4050
4051 /*
4052 * Check to see if this is not an initiator command and
4053 * deal with it differently if it is.
4054 */
4055 switch (hdrp->Function) {
4056 case MPI_FUNCTION_SCSI_IO_REQUEST:
4057 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4058 break;
4059 default:
4060 /*
4061 * XXX: FIX ME: need to abort target assists...
4062 */
4063 mpt_prt(mpt, "just putting it back on the pend q\n");
4064 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4065 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4066 links);
4067 continue;
4068 }
4069
4070 error = mpt_scsi_send_tmf(mpt,
4071 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4072 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4073 htole32(req->index | scsi_io_handler_id), TRUE);
4074
4075 if (error != 0) {
4076 /*
4077 * mpt_scsi_send_tmf hard resets on failure, so no
4078 * need to do so here. Our queue should be emptied
4079 * by the hard reset.
4080 */
4081 continue;
4082 }
4083
4084 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4085 REQ_STATE_DONE, TRUE, 500);
4086
4087 status = le16toh(mpt->tmf_req->IOCStatus);
4088 response = mpt->tmf_req->ResponseCode;
4089 mpt->tmf_req->state = REQ_STATE_FREE;
4090
4091 if (error != 0) {
4092 /*
4093 * If we've errored out,, reset the controller.
4094 */
4095 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4096 "Resetting controller\n");
4097 mpt_reset(mpt, TRUE);
4098 continue;
4099 }
4100
4101 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4102 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4103 "Resetting controller.\n", status);
4104 mpt_reset(mpt, TRUE);
4105 continue;
4106 }
4107
4108 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4109 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4110 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4111 "Resetting controller.\n", response);
4112 mpt_reset(mpt, TRUE);
4113 continue;
4114 }
4115 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4116 }
4117}
4118
4119/************************ Target Mode Support ****************************/
4120static void
4121mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4122{
4123 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4124 PTR_SGE_TRANSACTION32 tep;
4125 PTR_SGE_SIMPLE32 se;
4126 bus_addr_t paddr;
4127 uint32_t fl;
4128
4129 paddr = req->req_pbuf;
4130 paddr += MPT_RQSL(mpt);
4131
4132 fc = req->req_vbuf;
4133 memset(fc, 0, MPT_REQUEST_AREA);
4134 fc->BufferCount = 1;
4135 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4136 fc->MsgContext = htole32(req->index | fc_els_handler_id);
4137
4138 /*
4139 * Okay, set up ELS buffer pointers. ELS buffer pointers
4140 * consist of a TE SGL element (with details length of zero)
6d259fc1 4141 * followed by a SIMPLE SGL element which holds the address
2545bca0
MD
4142 * of the buffer.
4143 */
4144
4145 tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4146
4147 tep->ContextSize = 4;
4148 tep->Flags = 0;
4149 tep->TransactionContext[0] = htole32(ioindex);
4150
4151 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4152 fl =
4153 MPI_SGE_FLAGS_HOST_TO_IOC |
4154 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4155 MPI_SGE_FLAGS_LAST_ELEMENT |
4156 MPI_SGE_FLAGS_END_OF_LIST |
4157 MPI_SGE_FLAGS_END_OF_BUFFER;
4158 fl <<= MPI_SGE_FLAGS_SHIFT;
4159 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4160 se->FlagsLength = htole32(fl);
4161 se->Address = htole32((uint32_t) paddr);
4162 mpt_lprt(mpt, MPT_PRT_DEBUG,
4163 "add ELS index %d ioindex %d for %p:%u\n",
4164 req->index, ioindex, req, req->serno);
4165 KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4166 ("mpt_fc_post_els: request not locked"));
4167 mpt_send_cmd(mpt, req);
4168}
4169
4170static void
4171mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4172{
4173 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4174 PTR_CMD_BUFFER_DESCRIPTOR cb;
4175 bus_addr_t paddr;
4176
4177 paddr = req->req_pbuf;
4178 paddr += MPT_RQSL(mpt);
4179 memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4180 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4181
4182 fc = req->req_vbuf;
4183 fc->BufferCount = 1;
4184 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4185 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4186
4187 cb = &fc->Buffer[0];
4188 cb->IoIndex = htole16(ioindex);
4189 cb->u.PhysicalAddress32 = htole32((U32) paddr);
4190
4191 mpt_check_doorbell(mpt);
4192 mpt_send_cmd(mpt, req);
4193}
4194
4195static int
4196mpt_add_els_buffers(struct mpt_softc *mpt)
4197{
4198 int i;
4199
4200 if (mpt->is_fc == 0) {
4201 return (TRUE);
4202 }
4203
4204 if (mpt->els_cmds_allocated) {
4205 return (TRUE);
4206 }
4207
4208 mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *),
4209 M_DEVBUF, M_NOWAIT | M_ZERO);
4210
4211 if (mpt->els_cmd_ptrs == NULL) {
4212 return (FALSE);
4213 }
4214
4215 /*
4216 * Feed the chip some ELS buffer resources
4217 */
4218 for (i = 0; i < MPT_MAX_ELS; i++) {
4219 request_t *req = mpt_get_request(mpt, FALSE);
4220 if (req == NULL) {
4221 break;
4222 }
4223 req->state |= REQ_STATE_LOCKED;
4224 mpt->els_cmd_ptrs[i] = req;
4225 mpt_fc_post_els(mpt, req, i);
4226 }
4227
4228 if (i == 0) {
4229 mpt_prt(mpt, "unable to add ELS buffer resources\n");
4230 kfree(mpt->els_cmd_ptrs, M_DEVBUF);
4231 mpt->els_cmd_ptrs = NULL;
4232 return (FALSE);
4233 }
4234 if (i != MPT_MAX_ELS) {
4235 mpt_lprt(mpt, MPT_PRT_INFO,
4236 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS);
4237 }
4238 mpt->els_cmds_allocated = i;
4239 return(TRUE);
4240}
4241
4242static int
4243mpt_add_target_commands(struct mpt_softc *mpt)
4244{
4245 int i, max;
4246
4247 if (mpt->tgt_cmd_ptrs) {
4248 return (TRUE);
4249 }
4250
4251 max = MPT_MAX_REQUESTS(mpt) >> 1;
4252 if (max > mpt->mpt_max_tgtcmds) {
4253 max = mpt->mpt_max_tgtcmds;
4254 }
4255 mpt->tgt_cmd_ptrs =
4256 kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4257 if (mpt->tgt_cmd_ptrs == NULL) {
4258 mpt_prt(mpt,
4259 "mpt_add_target_commands: could not allocate cmd ptrs\n");
4260 return (FALSE);
4261 }
4262
4263 for (i = 0; i < max; i++) {
4264 request_t *req;
4265
4266 req = mpt_get_request(mpt, FALSE);
4267 if (req == NULL) {
4268 break;
4269 }
4270 req->state |= REQ_STATE_LOCKED;
4271 mpt->tgt_cmd_ptrs[i] = req;
4272 mpt_post_target_command(mpt, req, i);
4273 }
4274
4275
4276 if (i == 0) {
4277 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4278 kfree(mpt->tgt_cmd_ptrs, M_DEVBUF);
4279 mpt->tgt_cmd_ptrs = NULL;
4280 return (FALSE);
4281 }
4282
4283 mpt->tgt_cmds_allocated = i;
4284
4285 if (i < max) {
4286 mpt_lprt(mpt, MPT_PRT_INFO,
4287 "added %d of %d target bufs\n", i, max);
4288 }
4289 return (i);
4290}
4291
4292static int
4293mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4294{
4c42baf4 4295
2545bca0
MD
4296 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4297 mpt->twildcard = 1;
4298 } else if (lun >= MPT_MAX_LUNS) {
4299 return (EINVAL);
4300 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4301 return (EINVAL);
4302 }
4303 if (mpt->tenabled == 0) {
4304 if (mpt->is_fc) {
4305 (void) mpt_fc_reset_link(mpt, 0);
4306 }
4307 mpt->tenabled = 1;
4308 }
4309 if (lun == CAM_LUN_WILDCARD) {
4310 mpt->trt_wildcard.enabled = 1;
4311 } else {
4312 mpt->trt[lun].enabled = 1;
4313 }
4314 return (0);
4315}
4316
4317static int
4318mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4319{
4320 int i;
4c42baf4 4321
2545bca0
MD
4322 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4323 mpt->twildcard = 0;
4324 } else if (lun >= MPT_MAX_LUNS) {
4325 return (EINVAL);
4326 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4327 return (EINVAL);
4328 }
4329 if (lun == CAM_LUN_WILDCARD) {
4330 mpt->trt_wildcard.enabled = 0;
4331 } else {
4332 mpt->trt[lun].enabled = 0;
4333 }
4334 for (i = 0; i < MPT_MAX_LUNS; i++) {
4335 if (mpt->trt[lun].enabled) {
4336 break;
4337 }
4338 }
4339 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4340 if (mpt->is_fc) {
4341 (void) mpt_fc_reset_link(mpt, 0);
4342 }
4343 mpt->tenabled = 0;
4344 }
4345 return (0);
4346}
4347
4348/*
4349 * Called with MPT lock held
4350 */
4351static void
4352mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4353{
4354 struct ccb_scsiio *csio = &ccb->csio;
4355 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4356 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4357
4358 switch (tgt->state) {
4359 case TGT_STATE_IN_CAM:
4360 break;
4361 case TGT_STATE_MOVING_DATA:
4362 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4363 xpt_freeze_simq(mpt->sim, 1);
4364 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4365 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2545bca0 4366 xpt_done(ccb);
2545bca0
MD
4367 return;
4368 default:
4369 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4370 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4371 mpt_tgt_dump_req_state(mpt, cmd_req);
4372 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
2545bca0 4373 xpt_done(ccb);
2545bca0
MD
4374 return;
4375 }
4376
4377 if (csio->dxfer_len) {
4378 bus_dmamap_callback_t *cb;
4379 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4380 request_t *req;
4381
4382 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4c42baf4 4383 ("dxfer_len %u but direction is NONE", csio->dxfer_len));
2545bca0
MD
4384
4385 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4386 if (mpt->outofbeer == 0) {
4387 mpt->outofbeer = 1;
4388 xpt_freeze_simq(mpt->sim, 1);
4389 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4390 }
4391 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4392 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2545bca0 4393 xpt_done(ccb);
2545bca0
MD
4394 return;
4395 }
4396 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4397 if (sizeof (bus_addr_t) > 4) {
4398 cb = mpt_execute_req_a64;
4399 } else {
4400 cb = mpt_execute_req;
4401 }
4402
4403 req->ccb = ccb;
4404 ccb->ccb_h.ccb_req_ptr = req;
4405
4406 /*
4407 * Record the currently active ccb and the
4408 * request for it in our target state area.
4409 */
4410 tgt->ccb = ccb;
4411 tgt->req = req;
4412
4413 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4414 ta = req->req_vbuf;
4415
4416 if (mpt->is_sas) {
4417 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4418 cmd_req->req_vbuf;
4419 ta->QueueTag = ssp->InitiatorTag;
4420 } else if (mpt->is_spi) {
4421 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4422 cmd_req->req_vbuf;
4423 ta->QueueTag = sp->Tag;
4424 }
4425 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4426 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4427 ta->ReplyWord = htole32(tgt->reply_desc);
4428 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4429 ta->LUN[0] =
4430 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4431 ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4432 } else {
4433 ta->LUN[1] = csio->ccb_h.target_lun;
4434 }
4435
4436 ta->RelativeOffset = tgt->bytes_xfered;
4437 ta->DataLength = ccb->csio.dxfer_len;
4438 if (ta->DataLength > tgt->resid) {
4439 ta->DataLength = tgt->resid;
4440 }
4441
4442 /*
4443 * XXX Should be done after data transfer completes?
4444 */
4445 tgt->resid -= csio->dxfer_len;
4446 tgt->bytes_xfered += csio->dxfer_len;
4447
4448 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4449 ta->TargetAssistFlags |=
4450 TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4451 }
4452
4453#ifdef WE_TRUST_AUTO_GOOD_STATUS
4454 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4455 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4456 ta->TargetAssistFlags |=
4457 TARGET_ASSIST_FLAGS_AUTO_STATUS;
4458 }
4459#endif
4460 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4461
4462 mpt_lprt(mpt, MPT_PRT_DEBUG,
4463 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4464 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4465 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4466
2545bca0
MD
4467 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4468 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4469 int error;
6d259fc1 4470 crit_enter();
2545bca0
MD
4471 error = bus_dmamap_load(mpt->buffer_dmat,
4472 req->dmap, csio->data_ptr, csio->dxfer_len,
4473 cb, req, 0);
6d259fc1 4474 crit_exit();
2545bca0
MD
4475 if (error == EINPROGRESS) {
4476 xpt_freeze_simq(mpt->sim, 1);
4477 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4478 }
4479 } else {
4480 /*
4481 * We have been given a pointer to single
4482 * physical buffer.
4483 */
4484 struct bus_dma_segment seg;
4485 seg.ds_addr = (bus_addr_t)
4486 (vm_offset_t)csio->data_ptr;
4487 seg.ds_len = csio->dxfer_len;
4488 (*cb)(req, &seg, 1, 0);
4489 }
4490 } else {
4491 /*
4492 * We have been given a list of addresses.
4493 * This case could be easily supported but they are not
4494 * currently generated by the CAM subsystem so there
4495 * is no point in wasting the time right now.
4496 */
4497 struct bus_dma_segment *sgs;
4498 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4499 (*cb)(req, NULL, 0, EFAULT);
4500 } else {
4501 /* Just use the segments provided */
4502 sgs = (struct bus_dma_segment *)csio->data_ptr;
4503 (*cb)(req, sgs, csio->sglist_cnt, 0);
4504 }
4505 }
2545bca0
MD
4506 } else {
4507 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4508
4509 /*
4510 * XXX: I don't know why this seems to happen, but
4511 * XXX: completing the CCB seems to make things happy.
4512 * XXX: This seems to happen if the initiator requests
4513 * XXX: enough data that we have to do multiple CTIOs.
4514 */
4515 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4516 mpt_lprt(mpt, MPT_PRT_DEBUG,
4517 "Meaningless STATUS CCB (%p): flags %x status %x "
4518 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4519 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4520 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4521 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2545bca0 4522 xpt_done(ccb);
2545bca0
MD
4523 return;
4524 }
4525 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4526 sp = sense;
4527 memcpy(sp, &csio->sense_data,
4528 min(csio->sense_len, MPT_SENSE_SIZE));
4529 }
4530 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4531 }
4532}
4533
4534static void
4535mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4536 uint32_t lun, int send, uint8_t *data, size_t length)
4537{
4538 mpt_tgt_state_t *tgt;
4539 PTR_MSG_TARGET_ASSIST_REQUEST ta;
4540 SGE_SIMPLE32 *se;
4541 uint32_t flags;
4542 uint8_t *dptr;
4543 bus_addr_t pptr;
4544 request_t *req;
4545
4546 /*
4547 * We enter with resid set to the data load for the command.
4548 */
4549 tgt = MPT_TGT_STATE(mpt, cmd_req);
4550 if (length == 0 || tgt->resid == 0) {
4551 tgt->resid = 0;
4552 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4553 return;
4554 }
4555
4556 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4557 mpt_prt(mpt, "out of resources- dropping local response\n");
4558 return;
4559 }
4560 tgt->is_local = 1;
4561
4562
4563 memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4564 ta = req->req_vbuf;
4565
4566 if (mpt->is_sas) {
4567 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4568 ta->QueueTag = ssp->InitiatorTag;
4569 } else if (mpt->is_spi) {
4570 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4571 ta->QueueTag = sp->Tag;
4572 }
4573 ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4574 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4575 ta->ReplyWord = htole32(tgt->reply_desc);
4576 if (lun > MPT_MAX_LUNS) {
4577 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4578 ta->LUN[1] = lun & 0xff;
4579 } else {
4580 ta->LUN[1] = lun;
4581 }
4582 ta->RelativeOffset = 0;
4583 ta->DataLength = length;
4584
4585 dptr = req->req_vbuf;
4586 dptr += MPT_RQSL(mpt);
4587 pptr = req->req_pbuf;
4588 pptr += MPT_RQSL(mpt);
4589 memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4590
4591 se = (SGE_SIMPLE32 *) &ta->SGL[0];
4592 memset(se, 0,sizeof (*se));
4593
4594 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4595 if (send) {
4596 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4597 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4598 }
4599 se->Address = pptr;
4600 MPI_pSGE_SET_LENGTH(se, length);
4601 flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4602 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4603 MPI_pSGE_SET_FLAGS(se, flags);
4604
4605 tgt->ccb = NULL;
4606 tgt->req = req;
4607 tgt->resid -= length;
4608 tgt->bytes_xfered = length;
4609#ifdef WE_TRUST_AUTO_GOOD_STATUS
4610 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4611#else
4612 tgt->state = TGT_STATE_MOVING_DATA;
4613#endif
4614 mpt_send_cmd(mpt, req);
4615}
4616
4617/*
4618 * Abort queued up CCBs
4619 */
4620static cam_status
4621mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4622{
4623 struct mpt_hdr_stailq *lp;
4624 struct ccb_hdr *srch;
4625 int found = 0;
4626 union ccb *accb = ccb->cab.abort_ccb;
4627 tgt_resource_t *trtp;
4628
4629 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4630
4631 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4632 trtp = &mpt->trt_wildcard;
4633 } else {
4634 trtp = &mpt->trt[ccb->ccb_h.target_lun];
4635 }
4636
4637 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4638 lp = &trtp->atios;
4639 } else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4640 lp = &trtp->inots;
4641 } else {
4642 return (CAM_REQ_INVALID);
4643 }
4644
4645 STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4646 if (srch == &accb->ccb_h) {
4647 found = 1;
4648 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4649 break;
4650 }
4651 }
4652 if (found) {
4653 accb->ccb_h.status = CAM_REQ_ABORTED;
4654 xpt_done(accb);
4655 return (CAM_REQ_CMP);
4656 }
4657 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4658 return (CAM_PATH_INVALID);
4659}
4660
4661/*
4662 * Ask the MPT to abort the current target command
4663 */
4664static int
4665mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4666{
4667 int error;
4668 request_t *req;
4669 PTR_MSG_TARGET_MODE_ABORT abtp;
4670
4671 req = mpt_get_request(mpt, FALSE);
4672 if (req == NULL) {
4673 return (-1);
4674 }
4675 abtp = req->req_vbuf;
4676 memset(abtp, 0, sizeof (*abtp));
4677
4678 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4679 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4680 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4681 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4682 error = 0;
4683 if (mpt->is_fc || mpt->is_sas) {
4684 mpt_send_cmd(mpt, req);
4685 } else {
4686 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4687 }
4688 return (error);
4689}
4690
4691/*
4692 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4693 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4694 * FC929 to set bogus FC_RSP fields (nonzero residuals
4695 * but w/o RESID fields set). This causes QLogic initiators
4696 * to think maybe that a frame was lost.
4697 *
4698 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4699 * we use allocated requests to do TARGET_ASSIST and we
4700 * need to know when to release them.
4701 */
4702
4703static void
4704mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4705 uint8_t status, uint8_t const *sense_data)
4706{
4707 uint8_t *cmd_vbuf;
4708 mpt_tgt_state_t *tgt;
4709 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4710 request_t *req;
4711 bus_addr_t paddr;
4712 int resplen = 0;
4713 uint32_t fl;
4714
4715 cmd_vbuf = cmd_req->req_vbuf;
4716 cmd_vbuf += MPT_RQSL(mpt);
4717 tgt = MPT_TGT_STATE(mpt, cmd_req);
4718
4719 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4720 if (mpt->outofbeer == 0) {
4721 mpt->outofbeer = 1;
4722 xpt_freeze_simq(mpt->sim, 1);
4723 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4724 }
4725 if (ccb) {
4726 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4727 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2545bca0 4728 xpt_done(ccb);
2545bca0
MD
4729 } else {
4730 mpt_prt(mpt,
4731 "could not allocate status request- dropping\n");
4732 }
4733 return;
4734 }
4735 req->ccb = ccb;
4736 if (ccb) {
4737 ccb->ccb_h.ccb_mpt_ptr = mpt;
4738 ccb->ccb_h.ccb_req_ptr = req;
4739 }
4740
4741 /*
4742 * Record the currently active ccb, if any, and the
4743 * request for it in our target state area.
4744 */
4745 tgt->ccb = ccb;
4746 tgt->req = req;
4747 tgt->state = TGT_STATE_SENDING_STATUS;
4748
4749 tp = req->req_vbuf;
4750 paddr = req->req_pbuf;
4751 paddr += MPT_RQSL(mpt);
4752
4753 memset(tp, 0, sizeof (*tp));
4754 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4755 if (mpt->is_fc) {
4756 PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4757 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4758 uint8_t *sts_vbuf;
4759 uint32_t *rsp;
4760
4761 sts_vbuf = req->req_vbuf;
4762 sts_vbuf += MPT_RQSL(mpt);
4763 rsp = (uint32_t *) sts_vbuf;
4764 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4765
4766 /*
4767 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4768 * It has to be big-endian in memory and is organized
4769 * in 32 bit words, which are much easier to deal with
4770 * as words which are swizzled as needed.
4771 *
4772 * All we're filling here is the FC_RSP payload.
4773 * We may just have the chip synthesize it if
4774 * we have no residual and an OK status.
4775 *
4776 */
4777 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4778
4779 rsp[2] = status;
4780 if (tgt->resid) {
4781 rsp[2] |= 0x800; /* XXXX NEED MNEMONIC!!!! */
4782 rsp[3] = htobe32(tgt->resid);
4783#ifdef WE_TRUST_AUTO_GOOD_STATUS
4784 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4785#endif
4786 }
4787 if (status == SCSI_STATUS_CHECK_COND) {
4788 int i;
4789
4790 rsp[2] |= 0x200; /* XXXX NEED MNEMONIC!!!! */
4791 rsp[4] = htobe32(MPT_SENSE_SIZE);
4792 if (sense_data) {
4793 memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4794 } else {
4795 mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4796 "TION but no sense data?\n");
bc238f04 4797 memset(&rsp[8], 0, MPT_SENSE_SIZE);
2545bca0
MD
4798 }
4799 for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4800 rsp[i] = htobe32(rsp[i]);
4801 }
4802#ifdef WE_TRUST_AUTO_GOOD_STATUS
4803 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4804#endif
4805 }
4806#ifndef WE_TRUST_AUTO_GOOD_STATUS
4807 resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4808#endif
4809 rsp[2] = htobe32(rsp[2]);
4810 } else if (mpt->is_sas) {
4811 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4812 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4813 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4814 } else {
4815 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4816 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4817 tp->StatusCode = status;
4818 tp->QueueTag = htole16(sp->Tag);
4819 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4820 }
4821
4822 tp->ReplyWord = htole32(tgt->reply_desc);
4823 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4824
4825#ifdef WE_CAN_USE_AUTO_REPOST
4826 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4827#endif
4828 if (status == SCSI_STATUS_OK && resplen == 0) {
4829 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4830 } else {
4831 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4832 fl =
4833 MPI_SGE_FLAGS_HOST_TO_IOC |
4834 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
4835 MPI_SGE_FLAGS_LAST_ELEMENT |
4836 MPI_SGE_FLAGS_END_OF_LIST |
4837 MPI_SGE_FLAGS_END_OF_BUFFER;
4838 fl <<= MPI_SGE_FLAGS_SHIFT;
4839 fl |= resplen;
4840 tp->StatusDataSGE.FlagsLength = htole32(fl);
4841 }
4842
4843 mpt_lprt(mpt, MPT_PRT_DEBUG,
4844 "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4845 ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4846 req->serno, tgt->resid);
4847 if (ccb) {
4848 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4849 mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
4850 }
4851 mpt_send_cmd(mpt, req);
4852}
4853
4854static void
4855mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4856 tgt_resource_t *trtp, int init_id)
4857{
4858 struct ccb_immed_notify *inot;
4859 mpt_tgt_state_t *tgt;
4860
4861 tgt = MPT_TGT_STATE(mpt, req);
4862 inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4863 if (inot == NULL) {
4864 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4865 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4866 return;
4867 }
4868 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4869 mpt_lprt(mpt, MPT_PRT_DEBUG1,
4870 "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4871
4872 memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4873 inot->sense_len = 0;
4874 memset(inot->message_args, 0, sizeof (inot->message_args));
4875 inot->initiator_id = init_id; /* XXX */
4876
4877 /*
4878 * This is a somewhat grotesque attempt to map from task management
4879 * to old style SCSI messages. God help us all.
4880 */
4881 switch (fc) {
4882 case MPT_ABORT_TASK_SET:
4883 inot->message_args[0] = MSG_ABORT_TAG;
4884 break;
4885 case MPT_CLEAR_TASK_SET:
4886 inot->message_args[0] = MSG_CLEAR_TASK_SET;
4887 break;
4888 case MPT_TARGET_RESET:
4889 inot->message_args[0] = MSG_TARGET_RESET;
4890 break;
4891 case MPT_CLEAR_ACA:
4892 inot->message_args[0] = MSG_CLEAR_ACA;
4893 break;
4894 case MPT_TERMINATE_TASK:
4895 inot->message_args[0] = MSG_ABORT_TAG;
4896 break;
4897 default:
4898 inot->message_args[0] = MSG_NOOP;
4899 break;
4900 }
4901 tgt->ccb = (union ccb *) inot;
4902 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
2545bca0 4903 xpt_done((union ccb *)inot);
2545bca0
MD
4904}
4905
4906static void
4907mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4908{
4909 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4910 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
4911 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ',
4912 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I',
4913 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V',
4914 '0', '0', '0', '1'
4915 };
4916 struct ccb_accept_tio *atiop;
4917 lun_id_t lun;
4918 int tag_action = 0;
4919 mpt_tgt_state_t *tgt;
4920 tgt_resource_t *trtp = NULL;
4921 U8 *lunptr;
4922 U8 *vbuf;
4923 U16 itag;
4924 U16 ioindex;
4925 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
4926 uint8_t *cdbp;
4927
2545bca0
MD
4928 /*
4929 * Stash info for the current command where we can get at it later.
4930 */
4931 vbuf = req->req_vbuf;
4932 vbuf += MPT_RQSL(mpt);
4933
4934 /*
4935 * Get our state pointer set up.
4936 */
4937 tgt = MPT_TGT_STATE(mpt, req);
4938 if (tgt->state != TGT_STATE_LOADED) {
4939 mpt_tgt_dump_req_state(mpt, req);
4940 panic("bad target state in mpt_scsi_tgt_atio");
4941 }
4942 memset(tgt, 0, sizeof (mpt_tgt_state_t));
4943 tgt->state = TGT_STATE_IN_CAM;
4944 tgt->reply_desc = reply_desc;
4945 ioindex = GET_IO_INDEX(reply_desc);
4946 if (mpt->verbose >= MPT_PRT_DEBUG) {
4947 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
4948 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
4949 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
4950 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
4951 }
4952 if (mpt->is_fc) {
4953 PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
4954 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
4955 if (fc->FcpCntl[2]) {
4956 /*
4957 * Task Management Request
4958 */
4959 switch (fc->FcpCntl[2]) {
4960 case 0x2:
4961 fct = MPT_ABORT_TASK_SET;
4962 break;
4963 case 0x4:
4964 fct = MPT_CLEAR_TASK_SET;
4965 break;
4966 case 0x20:
4967 fct = MPT_TARGET_RESET;
4968 break;
4969 case 0x40:
4970 fct = MPT_CLEAR_ACA;
4971 break;
4972 case 0x80:
4973 fct = MPT_TERMINATE_TASK;
4974 break;
4975 default:
4976 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
4977 fc->FcpCntl[2]);
4978 mpt_scsi_tgt_status(mpt, 0, req,
4979 SCSI_STATUS_OK, 0);
4980 return;
4981 }
4982 } else {
4983 switch (fc->FcpCntl[1]) {
4984 case 0:
4985 tag_action = MSG_SIMPLE_Q_TAG;
4986 break;
4987 case 1:
4988 tag_action = MSG_HEAD_OF_Q_TAG;
4989 break;
4990 case 2:
4991 tag_action = MSG_ORDERED_Q_TAG;
4992 break;
4993 default:
4994 /*
4995 * Bah. Ignore Untagged Queing and ACA
4996 */
4997 tag_action = MSG_SIMPLE_Q_TAG;
4998 break;
4999 }
5000 }
5001 tgt->resid = be32toh(fc->FcpDl);
5002 cdbp = fc->FcpCdb;
5003 lunptr = fc->FcpLun;
5004 itag = be16toh(fc->OptionalOxid);
5005 } else if (mpt->is_sas) {
5006 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5007 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5008 cdbp = ssp->CDB;
5009 lunptr = ssp->LogicalUnitNumber;
5010 itag = ssp->InitiatorTag;
5011 } else {
5012 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5013 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5014 cdbp = sp->CDB;
5015 lunptr = sp->LogicalUnitNumber;
5016 itag = sp->Tag;
5017 }
5018
5019 /*
5020 * Generate a simple lun
5021 */
5022 switch (lunptr[0] & 0xc0) {
5023 case 0x40:
5024 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5025 break;
5026 case 0:
5027 lun = lunptr[1];
5028 break;
5029 default:
5030 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5031 lun = 0xffff;
5032 break;
5033 }
5034
5035 /*
5036 * Deal with non-enabled or bad luns here.
5037 */
5038 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5039 mpt->trt[lun].enabled == 0) {
5040 if (mpt->twildcard) {
5041 trtp = &mpt->trt_wildcard;
5042 } else if (fct == MPT_NIL_TMT_VALUE) {
5043 /*
5044 * In this case, we haven't got an upstream listener
5045 * for either a specific lun or wildcard luns. We
5046 * have to make some sensible response. For regular
5047 * inquiry, just return some NOT HERE inquiry data.
5048 * For VPD inquiry, report illegal field in cdb.
5049 * For REQUEST SENSE, just return NO SENSE data.
5050 * REPORT LUNS gets illegal command.
5051 * All other commands get 'no such device'.
5052 */
5053 uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5054 size_t len;
5055
5056 memset(buf, 0, MPT_SENSE_SIZE);
5057 cond = SCSI_STATUS_CHECK_COND;
5058 buf[0] = 0xf0;
5059 buf[2] = 0x5;
5060 buf[7] = 0x8;
5061 sp = buf;
5062 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5063
5064 switch (cdbp[0]) {
5065 case INQUIRY:
5066 {
5067 if (cdbp[1] != 0) {
5068 buf[12] = 0x26;
5069 buf[13] = 0x01;
5070 break;
5071 }
5072 len = min(tgt->resid, cdbp[4]);
5073 len = min(len, sizeof (null_iqd));
5074 mpt_lprt(mpt, MPT_PRT_DEBUG,
5075 "local inquiry %ld bytes\n", (long) len);
5076 mpt_scsi_tgt_local(mpt, req, lun, 1,
5077 null_iqd, len);
5078 return;
5079 }
5080 case REQUEST_SENSE:
5081 {
5082 buf[2] = 0x0;
5083 len = min(tgt->resid, cdbp[4]);
5084 len = min(len, sizeof (buf));
5085 mpt_lprt(mpt, MPT_PRT_DEBUG,
5086 "local reqsense %ld bytes\n", (long) len);
5087 mpt_scsi_tgt_local(mpt, req, lun, 1,
5088 buf, len);
5089 return;
5090 }
5091 case REPORT_LUNS:
5092 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5093 buf[12] = 0x26;
5094 return;
5095 default:
5096 mpt_lprt(mpt, MPT_PRT_DEBUG,
5097 "CMD 0x%x to unmanaged lun %u\n",
5098 cdbp[0], lun);
5099 buf[12] = 0x25;
5100 break;
5101 }
5102 mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5103 return;
5104 }
5105 /* otherwise, leave trtp NULL */
5106 } else {
5107 trtp = &mpt->trt[lun];
5108 }
5109
5110 /*
5111 * Deal with any task management
5112 */
5113 if (fct != MPT_NIL_TMT_VALUE) {
5114 if (trtp == NULL) {
5115 mpt_prt(mpt, "task mgmt function %x but no listener\n",
5116 fct);
5117 mpt_scsi_tgt_status(mpt, 0, req,
5118 SCSI_STATUS_OK, 0);
5119 } else {
5120 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5121 GET_INITIATOR_INDEX(reply_desc));
5122 }
5123 return;
5124 }
5125
5126
5127 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5128 if (atiop == NULL) {
5129 mpt_lprt(mpt, MPT_PRT_WARN,
5130 "no ATIOs for lun %u- sending back %s\n", lun,
5131 mpt->tenabled? "QUEUE FULL" : "BUSY");
5132 mpt_scsi_tgt_status(mpt, NULL, req,
5133 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5134 NULL);
5135 return;
5136 }
5137 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5138 mpt_lprt(mpt, MPT_PRT_DEBUG1,
5139 "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5140 atiop->ccb_h.ccb_mpt_ptr = mpt;
5141 atiop->ccb_h.status = CAM_CDB_RECVD;
5142 atiop->ccb_h.target_lun = lun;
5143 atiop->sense_len = 0;
5144 atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5145 atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5146 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5147
5148 /*
5149 * The tag we construct here allows us to find the
5150 * original request that the command came in with.
5151 *
5152 * This way we don't have to depend on anything but the
5153 * tag to find things when CCBs show back up from CAM.
5154 */
5155 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5156 tgt->tag_id = atiop->tag_id;
5157 if (tag_action) {
5158 atiop->tag_action = tag_action;
5159 atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5160 }
5161 if (mpt->verbose >= MPT_PRT_DEBUG) {
5162 int i;
5163 mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5164 atiop->ccb_h.target_lun);
5165 for (i = 0; i < atiop->cdb_len; i++) {
5166 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5167 (i == (atiop->cdb_len - 1))? '>' : ' ');
5168 }
5169 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
6d259fc1 5170 itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
2545bca0
MD
5171 }
5172
2545bca0 5173 xpt_done((union ccb *)atiop);
2545bca0
MD
5174}
5175
5176static void
5177mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5178{
5179 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5180
5181 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5182 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5183 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5184 tgt->tag_id, tgt->state);
5185}
5186
5187static void
5188mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5189{
4c42baf4 5190
2545bca0
MD
5191 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5192 req->index, req->index, req->state);
5193 mpt_tgt_dump_tgt_state(mpt, req);
5194}
5195
5196static int
5197mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5198 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5199{
5200 int dbg;
5201 union ccb *ccb;
5202 U16 status;
5203
5204 if (reply_frame == NULL) {
5205 /*
5206 * Figure out what the state of the command is.
5207 */
5208 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5209
5210#ifdef INVARIANTS
5211 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5212 if (tgt->req) {
5213 mpt_req_not_spcl(mpt, tgt->req,
5214 "turbo scsi_tgt_reply associated req", __LINE__);
5215 }
5216#endif
5217 switch(tgt->state) {
5218 case TGT_STATE_LOADED:
5219 /*
5220 * This is a new command starting.
5221 */
5222 mpt_scsi_tgt_atio(mpt, req, reply_desc);
5223 break;
5224 case TGT_STATE_MOVING_DATA:
5225 {
5226 uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5227
5228 ccb = tgt->ccb;
5229 if (tgt->req == NULL) {
5230 panic("mpt: turbo target reply with null "
5231 "associated request moving data");
5232 /* NOTREACHED */
5233 }
5234 if (ccb == NULL) {
5235 if (tgt->is_local == 0) {
5236 panic("mpt: turbo target reply with "
5237 "null associated ccb moving data");
5238 /* NOTREACHED */
5239 }
5240 mpt_lprt(mpt, MPT_PRT_DEBUG,
5241 "TARGET_ASSIST local done\n");
5242 TAILQ_REMOVE(&mpt->request_pending_list,
5243 tgt->req, links);
5244 mpt_free_request(mpt, tgt->req);
5245 tgt->req = NULL;
5246 mpt_scsi_tgt_status(mpt, NULL, req,
5247 0, NULL);
5248 return (TRUE);
5249 }
5250 tgt->ccb = NULL;
5251 tgt->nxfers++;
5252 mpt_req_untimeout(req, mpt_timeout, ccb);
5253 mpt_lprt(mpt, MPT_PRT_DEBUG,
5254 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5255 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5256 /*
5257 * Free the Target Assist Request
5258 */
5259 KASSERT(tgt->req->ccb == ccb,
5260 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5261 tgt->req->serno, tgt->req->ccb));
5262 TAILQ_REMOVE(&mpt->request_pending_list,
5263 tgt->req, links);
5264 mpt_free_request(mpt, tgt->req);
5265 tgt->req = NULL;
5266
5267 /*
5268 * Do we need to send status now? That is, are
5269 * we done with all our data transfers?
5270 */
5271 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5272 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5273 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5274 KASSERT(ccb->ccb_h.status,
4c42baf4 5275 ("zero ccb sts at %d", __LINE__));
2545bca0
MD
5276 tgt->state = TGT_STATE_IN_CAM;
5277 if (mpt->outofbeer) {
5278 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5279 mpt->outofbeer = 0;
5280 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5281 }
2545bca0 5282 xpt_done(ccb);
2545bca0
MD
5283 break;
5284 }
5285 /*
5286 * Otherwise, send status (and sense)
5287 */
5288 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5289 sp = sense;
5290 memcpy(sp, &ccb->csio.sense_data,
5291 min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5292 }
5293 mpt_scsi_tgt_status(mpt, ccb, req,
5294 ccb->csio.scsi_status, sp);
5295 break;
5296 }
5297 case TGT_STATE_SENDING_STATUS:
5298 case TGT_STATE_MOVING_DATA_AND_STATUS:
5299 {
5300 int ioindex;
5301 ccb = tgt->ccb;
5302
5303 if (tgt->req == NULL) {
5304 panic("mpt: turbo target reply with null "
5305 "associated request sending status");
5306 /* NOTREACHED */
5307 }
5308
5309 if (ccb) {
5310 tgt->ccb = NULL;
5311 if (tgt->state ==
5312 TGT_STATE_MOVING_DATA_AND_STATUS) {
5313 tgt->nxfers++;
5314 }
5315 mpt_req_untimeout(req, mpt_timeout, ccb);
5316 if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5317 ccb->ccb_h.status |= CAM_SENT_SENSE;
5318 }
5319 mpt_lprt(mpt, MPT_PRT_DEBUG,
5320 "TARGET_STATUS tag %x sts %x flgs %x req "
5321 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5322 ccb->ccb_h.flags, tgt->req);
5323 /*
5324 * Free the Target Send Status Request
5325 */
5326 KASSERT(tgt->req->ccb == ccb,
5327 ("tgt->req %p:%u tgt->req->ccb %p",
5328 tgt->req, tgt->req->serno, tgt->req->ccb));
5329 /*
5330 * Notify CAM that we're done
5331 */
5332 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5333 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5334 KASSERT(ccb->ccb_h.status,
4c42baf4 5335 ("ZERO ccb sts at %d", __LINE__));
2545bca0
MD
5336 tgt->ccb = NULL;
5337 } else {
5338 mpt_lprt(mpt, MPT_PRT_DEBUG,
5339 "TARGET_STATUS non-CAM for req %p:%u\n",
5340 tgt->req, tgt->req->serno);
5341 }
5342 TAILQ_REMOVE(&mpt->request_pending_list,
5343 tgt->req, links);
5344 mpt_free_request(mpt, tgt->req);
5345 tgt->req = NULL;
5346
5347 /*
5348 * And re-post the Command Buffer.
5349 * This will reset the state.
5350 */
5351 ioindex = GET_IO_INDEX(reply_desc);
5352 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5353 tgt->is_local = 0;
5354 mpt_post_target_command(mpt, req, ioindex);
5355
5356 /*
5357 * And post a done for anyone who cares
5358 */
5359 if (ccb) {
5360 if (mpt->outofbeer) {
5361 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5362 mpt->outofbeer = 0;
5363 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5364 }
2545bca0 5365 xpt_done(ccb);
2545bca0
MD
5366 }
5367 break;
5368 }
5369 case TGT_STATE_NIL: /* XXX This Never Happens XXX */
5370 tgt->state = TGT_STATE_LOADED;
5371 break;
5372 default:
5373 mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5374 "Reply Function\n", tgt->state);
5375 }
5376 return (TRUE);
5377 }
5378
5379 status = le16toh(reply_frame->IOCStatus);
5380 if (status != MPI_IOCSTATUS_SUCCESS) {
5381 dbg = MPT_PRT_ERROR;
5382 } else {
5383 dbg = MPT_PRT_DEBUG1;
5384 }
5385
5386 mpt_lprt(mpt, dbg,
5387 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5388 req, req->serno, reply_frame, reply_frame->Function, status);
5389
5390 switch (reply_frame->Function) {
5391 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5392 {
5393 mpt_tgt_state_t *tgt;
5394#ifdef INVARIANTS
5395 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5396#endif
5397 if (status != MPI_IOCSTATUS_SUCCESS) {
5398 /*
5399 * XXX What to do?
5400 */
5401 break;
5402 }
5403 tgt = MPT_TGT_STATE(mpt, req);
5404 KASSERT(tgt->state == TGT_STATE_LOADING,
4c42baf4 5405 ("bad state 0x%x on reply to buffer post", tgt->state));
2545bca0
MD
5406 mpt_assign_serno(mpt, req);
5407 tgt->state = TGT_STATE_LOADED;
5408 break;
5409 }
5410 case MPI_FUNCTION_TARGET_ASSIST:
5411#ifdef INVARIANTS
5412 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5413#endif
5414 mpt_prt(mpt, "target assist completion\n");
5415 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5416 mpt_free_request(mpt, req);
5417 break;
5418 case MPI_FUNCTION_TARGET_STATUS_SEND:
5419#ifdef INVARIANTS
5420 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5421#endif
5422 mpt_prt(mpt, "status send completion\n");
5423 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5424 mpt_free_request(mpt, req);
5425 break;
5426 case MPI_FUNCTION_TARGET_MODE_ABORT:
5427 {
5428 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5429 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5430 PTR_MSG_TARGET_MODE_ABORT abtp =
5431 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5432 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5433#ifdef INVARIANTS
5434 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5435#endif
5436 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5437 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5438 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5439 mpt_free_request(mpt, req);
5440 break;
5441 }
5442 default:
5443 mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5444 "0x%x\n", reply_frame->Function);
5445 break;
5446 }
5447 return (TRUE);
5448}