mpt(4): Fix a porting mistake I did in 6d259fc1.
[dragonfly.git] / sys / dev / disk / mpt / mpt_cam.c
CommitLineData
2545bca0
MD
1/*-
2 * FreeBSD/CAM specific routines for LSI '909 FC adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
41 * redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 * Support from LSI-Logic has also gone a great deal toward making this a
62 * workable subsystem and is gratefully acknowledged.
63 */
64/*-
65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66 * Copyright (c) 2005, WHEEL Sp. z o.o.
67 * Copyright (c) 2004, 2005 Justin T. Gibbs
68 * All rights reserved.
69 *
70 * Redistribution and use in source and binary forms, with or without
71 * modification, are permitted provided that the following conditions are
72 * met:
73 * 1. Redistributions of source code must retain the above copyright
74 * notice, this list of conditions and the following disclaimer.
75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76 * substantially similar to the "NO WARRANTY" disclaimer below
77 * ("Disclaimer") and any redistribution must be conditioned upon including
78 * a substantially similar Disclaimer requirement for further binary
79 * redistribution.
80 * 3. Neither the names of the above listed copyright holders nor the names
81 * of any contributors may be used to endorse or promote products derived
82 * from this software without specific prior written permission.
83 *
84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32af04f7 95 *
6d259fc1 96 * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.77 2011/04/22 09:59:16 marius Exp $
2545bca0 97 */
2545bca0
MD
98
99#include <dev/disk/mpt/mpt.h>
100#include <dev/disk/mpt/mpt_cam.h>
101#include <dev/disk/mpt/mpt_raid.h>
102
103#include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104#include "dev/disk/mpt/mpilib/mpi_init.h"
105#include "dev/disk/mpt/mpilib/mpi_targ.h"
106#include "dev/disk/mpt/mpilib/mpi_fc.h"
107#include "dev/disk/mpt/mpilib/mpi_sas.h"
2545bca0 108#include <sys/sysctl.h>
2545bca0
MD
109#include <sys/callout.h>
110#include <sys/kthread.h>
111
2545bca0
MD
112#ifndef CAM_NEW_TRAN_CODE
113#define CAM_NEW_TRAN_CODE 1
114#endif
2545bca0
MD
115
116static void mpt_poll(struct cam_sim *);
117static timeout_t mpt_timeout;
118static void mpt_action(struct cam_sim *, union ccb *);
119static int
120mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
121static void mpt_setwidth(struct mpt_softc *, int, int);
122static void mpt_setsync(struct mpt_softc *, int, int, int);
123static int mpt_update_spi_config(struct mpt_softc *, int);
124static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
125
126static mpt_reply_handler_t mpt_scsi_reply_handler;
127static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
128static mpt_reply_handler_t mpt_fc_els_reply_handler;
129static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
130 MSG_DEFAULT_REPLY *);
131static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
132static int mpt_fc_reset_link(struct mpt_softc *, int);
133
134static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
135static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
136static void mpt_recovery_thread(void *arg);
137static void mpt_recover_commands(struct mpt_softc *mpt);
138
139static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
140 u_int, u_int, u_int, int);
141
142static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
143static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
144static int mpt_add_els_buffers(struct mpt_softc *mpt);
145static int mpt_add_target_commands(struct mpt_softc *mpt);
146static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
147static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
148static void mpt_target_start_io(struct mpt_softc *, union ccb *);
149static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
150static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
151static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
152 uint8_t, uint8_t const *);
153static void
154mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
155 tgt_resource_t *, int);
156static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
157static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
158static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
159static mpt_reply_handler_t mpt_sata_pass_reply_handler;
160
161static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
162static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
163static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
164static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
165
166static mpt_probe_handler_t mpt_cam_probe;
167static mpt_attach_handler_t mpt_cam_attach;
168static mpt_enable_handler_t mpt_cam_enable;
169static mpt_ready_handler_t mpt_cam_ready;
170static mpt_event_handler_t mpt_cam_event;
171static mpt_reset_handler_t mpt_cam_ioc_reset;
172static mpt_detach_handler_t mpt_cam_detach;
173
174static struct mpt_personality mpt_cam_personality =
175{
176 .name = "mpt_cam",
177 .probe = mpt_cam_probe,
178 .attach = mpt_cam_attach,
179 .enable = mpt_cam_enable,
180 .ready = mpt_cam_ready,
181 .event = mpt_cam_event,
182 .reset = mpt_cam_ioc_reset,
183 .detach = mpt_cam_detach,
184};
185
186DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
187MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
188
189int mpt_enable_sata_wc = -1;
190TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
191
192int
193mpt_cam_probe(struct mpt_softc *mpt)
194{
195 int role;
196
197 /*
198 * Only attach to nodes that support the initiator or target role
199 * (or want to) or have RAID physical devices that need CAM pass-thru
200 * support.
201 */
202 if (mpt->do_cfg_role) {
203 role = mpt->cfg_role;
204 } else {
205 role = mpt->role;
206 }
207 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
208 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
209 return (0);
210 }
211 return (ENODEV);
212}
213
214int
215mpt_cam_attach(struct mpt_softc *mpt)
216{
217 struct cam_devq *devq;
218 mpt_handler_t handler;
219 int maxq;
220 int error;
221
222 MPT_LOCK(mpt);
223 TAILQ_INIT(&mpt->request_timeout_list);
224 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
225 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
226
227 handler.reply_handler = mpt_scsi_reply_handler;
228 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
229 &scsi_io_handler_id);
230 if (error != 0) {
231 MPT_UNLOCK(mpt);
232 goto cleanup;
233 }
234
235 handler.reply_handler = mpt_scsi_tmf_reply_handler;
236 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
237 &scsi_tmf_handler_id);
238 if (error != 0) {
239 MPT_UNLOCK(mpt);
240 goto cleanup;
241 }
242
243 /*
244 * If we're fibre channel and could support target mode, we register
245 * an ELS reply handler and give it resources.
246 */
247 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
248 handler.reply_handler = mpt_fc_els_reply_handler;
249 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
250 &fc_els_handler_id);
251 if (error != 0) {
252 MPT_UNLOCK(mpt);
253 goto cleanup;
254 }
255 if (mpt_add_els_buffers(mpt) == FALSE) {
256 error = ENOMEM;
257 MPT_UNLOCK(mpt);
258 goto cleanup;
259 }
260 maxq -= mpt->els_cmds_allocated;
261 }
262
263 /*
264 * If we support target mode, we register a reply handler for it,
265 * but don't add command resources until we actually enable target
266 * mode.
267 */
268 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
269 handler.reply_handler = mpt_scsi_tgt_reply_handler;
270 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
271 &mpt->scsi_tgt_handler_id);
272 if (error != 0) {
273 MPT_UNLOCK(mpt);
274 goto cleanup;
275 }
276 }
277
278 if (mpt->is_sas) {
279 handler.reply_handler = mpt_sata_pass_reply_handler;
280 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
281 &sata_pass_handler_id);
282 if (error != 0) {
283 MPT_UNLOCK(mpt);
284 goto cleanup;
285 }
286 }
287
288 /*
289 * We keep one request reserved for timeout TMF requests.
290 */
291 mpt->tmf_req = mpt_get_request(mpt, FALSE);
292 if (mpt->tmf_req == NULL) {
293 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
294 error = ENOMEM;
295 MPT_UNLOCK(mpt);
296 goto cleanup;
297 }
298
299 /*
300 * Mark the request as free even though not on the free list.
301 * There is only one TMF request allowed to be outstanding at
302 * a time and the TMF routines perform their own allocation
303 * tracking using the standard state flags.
304 */
305 mpt->tmf_req->state = REQ_STATE_FREE;
306 maxq--;
307
308 /*
309 * The rest of this is CAM foo, for which we need to drop our lock
310 */
311 MPT_UNLOCK(mpt);
312
313 if (mpt_spawn_recovery_thread(mpt) != 0) {
314 mpt_prt(mpt, "Unable to spawn recovery thread!\n");
315 error = ENOMEM;
316 goto cleanup;
317 }
318
319 /*
320 * Create the device queue for our SIM(s).
321 */
322 devq = cam_simq_alloc(maxq);
323 if (devq == NULL) {
324 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
325 error = ENOMEM;
326 goto cleanup;
327 }
328
329 /*
330 * Construct our SIM entry.
331 */
332 mpt->sim =
333 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
334 if (mpt->sim == NULL) {
335 mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
336 cam_devq_release(devq);
337 error = ENOMEM;
338 goto cleanup;
339 }
340
341 /*
342 * Register exactly this bus.
343 */
344 MPT_LOCK(mpt);
345 if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
346 mpt_prt(mpt, "Bus registration Failed!\n");
347 error = ENOMEM;
348 MPT_UNLOCK(mpt);
349 goto cleanup;
350 }
351
352 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
353 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
354 mpt_prt(mpt, "Unable to allocate Path!\n");
355 error = ENOMEM;
356 MPT_UNLOCK(mpt);
357 goto cleanup;
358 }
359 MPT_UNLOCK(mpt);
360
361 /*
362 * Only register a second bus for RAID physical
363 * devices if the controller supports RAID.
364 */
365 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
366 return (0);
367 }
368
369 /*
370 * Create a "bus" to export all hidden disks to CAM.
371 */
372 mpt->phydisk_sim =
373 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
374 if (mpt->phydisk_sim == NULL) {
375 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
376 error = ENOMEM;
377 goto cleanup;
378 }
379
380 /*
381 * Register this bus.
382 */
383 MPT_LOCK(mpt);
384 if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
385 CAM_SUCCESS) {
386 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
387 error = ENOMEM;
388 MPT_UNLOCK(mpt);
389 goto cleanup;
390 }
391
392 if (xpt_create_path(&mpt->phydisk_path, NULL,
393 cam_sim_path(mpt->phydisk_sim),
394 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
395 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
396 error = ENOMEM;
397 MPT_UNLOCK(mpt);
398 goto cleanup;
399 }
400 MPT_UNLOCK(mpt);
401 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
402 return (0);
403
404cleanup:
405 mpt_cam_detach(mpt);
406 return (error);
407}
408
409/*
410 * Read FC configuration information
411 */
412static int
413mpt_read_config_info_fc(struct mpt_softc *mpt)
414{
415 char *topology = NULL;
416 int rv;
417
418 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
419 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
420 if (rv) {
421 return (-1);
422 }
423 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
424 mpt->mpt_fcport_page0.Header.PageVersion,
425 mpt->mpt_fcport_page0.Header.PageLength,
426 mpt->mpt_fcport_page0.Header.PageNumber,
427 mpt->mpt_fcport_page0.Header.PageType);
428
429
430 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
431 sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
432 if (rv) {
433 mpt_prt(mpt, "failed to read FC Port Page 0\n");
434 return (-1);
435 }
436 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
437
438 mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
439
440 switch (mpt->mpt_fcport_page0.Flags &
441 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
442 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
443 mpt->mpt_fcport_speed = 0;
444 topology = "<NO LOOP>";
445 break;
446 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
447 topology = "N-Port";
448 break;
449 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
450 topology = "NL-Port";
451 break;
452 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
453 topology = "F-Port";
454 break;
455 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
456 topology = "FL-Port";
457 break;
458 default:
459 mpt->mpt_fcport_speed = 0;
460 topology = "?";
461 break;
462 }
463
464 mpt_lprt(mpt, MPT_PRT_INFO,
465 "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
466 "Speed %u-Gbit\n", topology,
6d259fc1
SW
467 mpt->mpt_fcport_page0.WWNN.High,
468 mpt->mpt_fcport_page0.WWNN.Low,
469 mpt->mpt_fcport_page0.WWPN.High,
470 mpt->mpt_fcport_page0.WWPN.Low,
471 mpt->mpt_fcport_speed);
2545bca0
MD
472 MPT_UNLOCK(mpt);
473 {
6d259fc1 474 ksnprintf(mpt->scinfo.fc.wwnn,
2545bca0
MD
475 sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
476 mpt->mpt_fcport_page0.WWNN.High,
477 mpt->mpt_fcport_page0.WWNN.Low);
478
6d259fc1 479 ksnprintf(mpt->scinfo.fc.wwpn,
2545bca0
MD
480 sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
481 mpt->mpt_fcport_page0.WWPN.High,
482 mpt->mpt_fcport_page0.WWPN.Low);
483
6d259fc1
SW
484 SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx,
485 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
2545bca0
MD
486 "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
487 "World Wide Node Name");
488
6d259fc1
SW
489 SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx,
490 SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
2545bca0
MD
491 "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
492 "World Wide Port Name");
493
494 }
495 MPT_LOCK(mpt);
2545bca0
MD
496 return (0);
497}
498
499/*
500 * Set FC configuration information.
501 */
502static int
503mpt_set_initial_config_fc(struct mpt_softc *mpt)
504{
505
506 CONFIG_PAGE_FC_PORT_1 fc;
507 U32 fl;
508 int r, doit = 0;
509 int role;
510
511 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
512 &fc.Header, FALSE, 5000);
513 if (r) {
514 mpt_prt(mpt, "failed to read FC page 1 header\n");
515 return (mpt_fc_reset_link(mpt, 1));
516 }
517
518 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
519 &fc.Header, sizeof (fc), FALSE, 5000);
520 if (r) {
521 mpt_prt(mpt, "failed to read FC page 1\n");
522 return (mpt_fc_reset_link(mpt, 1));
523 }
524 mpt2host_config_page_fc_port_1(&fc);
525
526 /*
527 * Check our flags to make sure we support the role we want.
528 */
529 doit = 0;
530 role = 0;
531 fl = fc.Flags;
532
533 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
534 role |= MPT_ROLE_INITIATOR;
535 }
536 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
537 role |= MPT_ROLE_TARGET;
538 }
539
540 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
541
542 if (mpt->do_cfg_role == 0) {
543 role = mpt->cfg_role;
544 } else {
545 mpt->do_cfg_role = 0;
546 }
547
548 if (role != mpt->cfg_role) {
549 if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
550 if ((role & MPT_ROLE_INITIATOR) == 0) {
551 mpt_prt(mpt, "adding initiator role\n");
552 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
553 doit++;
554 } else {
555 mpt_prt(mpt, "keeping initiator role\n");
556 }
557 } else if (role & MPT_ROLE_INITIATOR) {
558 mpt_prt(mpt, "removing initiator role\n");
559 doit++;
560 }
561 if (mpt->cfg_role & MPT_ROLE_TARGET) {
562 if ((role & MPT_ROLE_TARGET) == 0) {
563 mpt_prt(mpt, "adding target role\n");
564 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
565 doit++;
566 } else {
567 mpt_prt(mpt, "keeping target role\n");
568 }
569 } else if (role & MPT_ROLE_TARGET) {
570 mpt_prt(mpt, "removing target role\n");
571 doit++;
572 }
573 mpt->role = mpt->cfg_role;
574 }
575
576 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
577 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
578 mpt_prt(mpt, "adding OXID option\n");
579 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
580 doit++;
581 }
582 }
583
584 if (doit) {
585 fc.Flags = fl;
586 host2mpt_config_page_fc_port_1(&fc);
587 r = mpt_write_cfg_page(mpt,
588 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
589 sizeof(fc), FALSE, 5000);
590 if (r != 0) {
591 mpt_prt(mpt, "failed to update NVRAM with changes\n");
592 return (0);
593 }
594 mpt_prt(mpt, "NOTE: NVRAM changes will not take "
595 "effect until next reboot or IOC reset\n");
596 }
597 return (0);
598}
599
600static int
601mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
602{
603 ConfigExtendedPageHeader_t hdr;
604 struct mptsas_phyinfo *phyinfo;
605 SasIOUnitPage0_t *buffer;
606 int error, len, i;
607
608 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
609 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
610 &hdr, 0, 10000);
611 if (error)
612 goto out;
613 if (hdr.ExtPageLength == 0) {
614 error = ENXIO;
615 goto out;
616 }
617
618 len = hdr.ExtPageLength * 4;
619 buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
620 if (buffer == NULL) {
621 error = ENOMEM;
622 goto out;
623 }
624
625 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
626 0, &hdr, buffer, len, 0, 10000);
627 if (error) {
628 kfree(buffer, M_DEVBUF);
629 goto out;
630 }
631
632 portinfo->num_phys = buffer->NumPhys;
633 portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) *
634 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
635 if (portinfo->phy_info == NULL) {
636 kfree(buffer, M_DEVBUF);
637 error = ENOMEM;
638 goto out;
639 }
640
641 for (i = 0; i < portinfo->num_phys; i++) {
642 phyinfo = &portinfo->phy_info[i];
643 phyinfo->phy_num = i;
644 phyinfo->port_id = buffer->PhyData[i].Port;
645 phyinfo->negotiated_link_rate =
646 buffer->PhyData[i].NegotiatedLinkRate;
647 phyinfo->handle =
648 le16toh(buffer->PhyData[i].ControllerDevHandle);
649 }
650
651 kfree(buffer, M_DEVBUF);
652out:
653 return (error);
654}
655
656static int
657mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
658 uint32_t form, uint32_t form_specific)
659{
660 ConfigExtendedPageHeader_t hdr;
661 SasPhyPage0_t *buffer;
662 int error;
663
664 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
665 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
666 0, 10000);
667 if (error)
668 goto out;
669 if (hdr.ExtPageLength == 0) {
670 error = ENXIO;
671 goto out;
672 }
673
674 buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
675 if (buffer == NULL) {
676 error = ENOMEM;
677 goto out;
678 }
679
680 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
681 form + form_specific, &hdr, buffer,
682 sizeof(SasPhyPage0_t), 0, 10000);
683 if (error) {
684 kfree(buffer, M_DEVBUF);
685 goto out;
686 }
687
688 phy_info->hw_link_rate = buffer->HwLinkRate;
689 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
690 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
691 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
692
693 kfree(buffer, M_DEVBUF);
694out:
695 return (error);
696}
697
698static int
699mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
700 uint32_t form, uint32_t form_specific)
701{
702 ConfigExtendedPageHeader_t hdr;
703 SasDevicePage0_t *buffer;
704 uint64_t sas_address;
705 int error = 0;
706
707 bzero(device_info, sizeof(*device_info));
708 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
709 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
710 &hdr, 0, 10000);
711 if (error)
712 goto out;
713 if (hdr.ExtPageLength == 0) {
714 error = ENXIO;
715 goto out;
716 }
717
718 buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
719 if (buffer == NULL) {
720 error = ENOMEM;
721 goto out;
722 }
723
724 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
725 form + form_specific, &hdr, buffer,
726 sizeof(SasDevicePage0_t), 0, 10000);
727 if (error) {
728 kfree(buffer, M_DEVBUF);
729 goto out;
730 }
731
732 device_info->dev_handle = le16toh(buffer->DevHandle);
733 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
734 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
735 device_info->slot = le16toh(buffer->Slot);
736 device_info->phy_num = buffer->PhyNum;
737 device_info->physical_port = buffer->PhysicalPort;
738 device_info->target_id = buffer->TargetID;
739 device_info->bus = buffer->Bus;
740 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
741 device_info->sas_address = le64toh(sas_address);
742 device_info->device_info = le32toh(buffer->DeviceInfo);
743
744 kfree(buffer, M_DEVBUF);
745out:
746 return (error);
747}
748
749/*
750 * Read SAS configuration information. Nothing to do yet.
751 */
752static int
753mpt_read_config_info_sas(struct mpt_softc *mpt)
754{
755 struct mptsas_portinfo *portinfo;
756 struct mptsas_phyinfo *phyinfo;
757 int error, i;
758
759 portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
760 if (portinfo == NULL)
761 return (ENOMEM);
762
763 error = mptsas_sas_io_unit_pg0(mpt, portinfo);
764 if (error) {
765 kfree(portinfo, M_DEVBUF);
766 return (0);
767 }
768
769 for (i = 0; i < portinfo->num_phys; i++) {
770 phyinfo = &portinfo->phy_info[i];
771 error = mptsas_sas_phy_pg0(mpt, phyinfo,
772 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
773 MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
774 if (error)
775 break;
776 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
777 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
778 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
779 phyinfo->handle);
780 if (error)
781 break;
782 phyinfo->identify.phy_num = phyinfo->phy_num = i;
783 if (phyinfo->attached.dev_handle)
784 error = mptsas_sas_device_pg0(mpt,
785 &phyinfo->attached,
786 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
787 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
788 phyinfo->attached.dev_handle);
789 if (error)
790 break;
791 }
792 mpt->sas_portinfo = portinfo;
793 return (0);
794}
795
796static void
797mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
798 int enabled)
799{
800 SataPassthroughRequest_t *pass;
801 request_t *req;
802 int error, status;
803
804 req = mpt_get_request(mpt, 0);
805 if (req == NULL)
806 return;
807
808 pass = req->req_vbuf;
809 bzero(pass, sizeof(SataPassthroughRequest_t));
810 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
811 pass->TargetID = devinfo->target_id;
812 pass->Bus = devinfo->bus;
813 pass->PassthroughFlags = 0;
814 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
815 pass->DataLength = 0;
816 pass->MsgContext = htole32(req->index | sata_pass_handler_id);
817 pass->CommandFIS[0] = 0x27;
818 pass->CommandFIS[1] = 0x80;
819 pass->CommandFIS[2] = 0xef;
820 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
821 pass->CommandFIS[7] = 0x40;
822 pass->CommandFIS[15] = 0x08;
823
824 mpt_check_doorbell(mpt);
825 mpt_send_cmd(mpt, req);
826 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
827 10 * 1000);
828 if (error) {
829 mpt_free_request(mpt, req);
830 kprintf("error %d sending passthrough\n", error);
831 return;
832 }
833
834 status = le16toh(req->IOCStatus);
835 if (status != MPI_IOCSTATUS_SUCCESS) {
836 mpt_free_request(mpt, req);
837 kprintf("IOCSTATUS %d\n", status);
838 return;
839 }
840
841 mpt_free_request(mpt, req);
842}
843
844/*
845 * Set SAS configuration information. Nothing to do yet.
846 */
847static int
848mpt_set_initial_config_sas(struct mpt_softc *mpt)
849{
850 struct mptsas_phyinfo *phyinfo;
851 int i;
852
853 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
854 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
855 phyinfo = &mpt->sas_portinfo->phy_info[i];
856 if (phyinfo->attached.dev_handle == 0)
857 continue;
858 if ((phyinfo->attached.device_info &
859 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
860 continue;
861 if (bootverbose)
862 device_printf(mpt->dev,
863 "%sabling SATA WC on phy %d\n",
864 (mpt_enable_sata_wc) ? "En" : "Dis", i);
865 mptsas_set_sata_wc(mpt, &phyinfo->attached,
866 mpt_enable_sata_wc);
867 }
868 }
869
870 return (0);
871}
872
873static int
874mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
875 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
876{
877 if (req != NULL) {
878
879 if (reply_frame != NULL) {
880 req->IOCStatus = le16toh(reply_frame->IOCStatus);
881 }
882 req->state &= ~REQ_STATE_QUEUED;
883 req->state |= REQ_STATE_DONE;
884 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
885 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
886 wakeup(req);
887 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
888 /*
889 * Whew- we can free this request (late completion)
890 */
891 mpt_free_request(mpt, req);
892 }
893 }
894
895 return (TRUE);
896}
897
898/*
899 * Read SCSI configuration information
900 */
901static int
902mpt_read_config_info_spi(struct mpt_softc *mpt)
903{
904 int rv, i;
905
906 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
907 &mpt->mpt_port_page0.Header, FALSE, 5000);
908 if (rv) {
909 return (-1);
910 }
911 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
912 mpt->mpt_port_page0.Header.PageVersion,
913 mpt->mpt_port_page0.Header.PageLength,
914 mpt->mpt_port_page0.Header.PageNumber,
915 mpt->mpt_port_page0.Header.PageType);
916
917 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
918 &mpt->mpt_port_page1.Header, FALSE, 5000);
919 if (rv) {
920 return (-1);
921 }
922 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
923 mpt->mpt_port_page1.Header.PageVersion,
924 mpt->mpt_port_page1.Header.PageLength,
925 mpt->mpt_port_page1.Header.PageNumber,
926 mpt->mpt_port_page1.Header.PageType);
927
928 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
929 &mpt->mpt_port_page2.Header, FALSE, 5000);
930 if (rv) {
931 return (-1);
932 }
933 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
934 mpt->mpt_port_page2.Header.PageVersion,
935 mpt->mpt_port_page2.Header.PageLength,
936 mpt->mpt_port_page2.Header.PageNumber,
937 mpt->mpt_port_page2.Header.PageType);
938
939 for (i = 0; i < 16; i++) {
940 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
941 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
942 if (rv) {
943 return (-1);
944 }
945 mpt_lprt(mpt, MPT_PRT_DEBUG,
946 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
947 mpt->mpt_dev_page0[i].Header.PageVersion,
948 mpt->mpt_dev_page0[i].Header.PageLength,
949 mpt->mpt_dev_page0[i].Header.PageNumber,
950 mpt->mpt_dev_page0[i].Header.PageType);
951
952 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
953 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
954 if (rv) {
955 return (-1);
956 }
957 mpt_lprt(mpt, MPT_PRT_DEBUG,
958 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
959 mpt->mpt_dev_page1[i].Header.PageVersion,
960 mpt->mpt_dev_page1[i].Header.PageLength,
961 mpt->mpt_dev_page1[i].Header.PageNumber,
962 mpt->mpt_dev_page1[i].Header.PageType);
963 }
964
965 /*
966 * At this point, we don't *have* to fail. As long as we have
967 * valid config header information, we can (barely) lurch
968 * along.
969 */
970
971 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
972 sizeof(mpt->mpt_port_page0), FALSE, 5000);
973 if (rv) {
974 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
975 } else {
976 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
977 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
978 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
6d259fc1
SW
979 mpt->mpt_port_page0.Capabilities,
980 mpt->mpt_port_page0.PhysicalInterface);
2545bca0
MD
981 }
982
983 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
984 sizeof(mpt->mpt_port_page1), FALSE, 5000);
985 if (rv) {
986 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
987 } else {
988 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
989 mpt_lprt(mpt, MPT_PRT_DEBUG,
990 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
6d259fc1
SW
991 mpt->mpt_port_page1.Configuration,
992 mpt->mpt_port_page1.OnBusTimerValue);
2545bca0
MD
993 }
994
995 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
996 sizeof(mpt->mpt_port_page2), FALSE, 5000);
997 if (rv) {
998 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
999 } else {
1000 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1001 "Port Page 2: Flags %x Settings %x\n",
6d259fc1
SW
1002 mpt->mpt_port_page2.PortFlags,
1003 mpt->mpt_port_page2.PortSettings);
2545bca0
MD
1004 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1005 for (i = 0; i < 16; i++) {
1006 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1007 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1008 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1009 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1010 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1011 }
1012 }
1013
1014 for (i = 0; i < 16; i++) {
1015 rv = mpt_read_cur_cfg_page(mpt, i,
1016 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1017 FALSE, 5000);
1018 if (rv) {
1019 mpt_prt(mpt,
1020 "cannot read SPI Target %d Device Page 0\n", i);
1021 continue;
1022 }
1023 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1024 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1025 "target %d page 0: Negotiated Params %x Information %x\n",
6d259fc1
SW
1026 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1027 mpt->mpt_dev_page0[i].Information);
2545bca0
MD
1028
1029 rv = mpt_read_cur_cfg_page(mpt, i,
1030 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1031 FALSE, 5000);
1032 if (rv) {
1033 mpt_prt(mpt,
1034 "cannot read SPI Target %d Device Page 1\n", i);
1035 continue;
1036 }
1037 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1038 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1039 "target %d page 1: Requested Params %x Configuration %x\n",
6d259fc1
SW
1040 i, mpt->mpt_dev_page1[i].RequestedParameters,
1041 mpt->mpt_dev_page1[i].Configuration);
2545bca0
MD
1042 }
1043 return (0);
1044}
1045
1046/*
1047 * Validate SPI configuration information.
1048 *
1049 * In particular, validate SPI Port Page 1.
1050 */
1051static int
1052mpt_set_initial_config_spi(struct mpt_softc *mpt)
1053{
6d259fc1 1054 int error, i, pp1val;
2545bca0
MD
1055
1056 mpt->mpt_disc_enable = 0xff;
1057 mpt->mpt_tag_enable = 0;
1058
6d259fc1
SW
1059 pp1val = ((1 << mpt->mpt_ini_id) <<
1060 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
2545bca0
MD
1061 if (mpt->mpt_port_page1.Configuration != pp1val) {
1062 CONFIG_PAGE_SCSI_PORT_1 tmp;
1063
1064 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
6d259fc1 1065 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
2545bca0
MD
1066 tmp = mpt->mpt_port_page1;
1067 tmp.Configuration = pp1val;
1068 host2mpt_config_page_scsi_port_1(&tmp);
1069 error = mpt_write_cur_cfg_page(mpt, 0,
1070 &tmp.Header, sizeof(tmp), FALSE, 5000);
1071 if (error) {
1072 return (-1);
1073 }
1074 error = mpt_read_cur_cfg_page(mpt, 0,
1075 &tmp.Header, sizeof(tmp), FALSE, 5000);
1076 if (error) {
1077 return (-1);
1078 }
1079 mpt2host_config_page_scsi_port_1(&tmp);
1080 if (tmp.Configuration != pp1val) {
1081 mpt_prt(mpt,
1082 "failed to reset SPI Port Page 1 Config value\n");
1083 return (-1);
1084 }
1085 mpt->mpt_port_page1 = tmp;
1086 }
1087
1088 /*
1089 * The purpose of this exercise is to get
1090 * all targets back to async/narrow.
1091 *
1092 * We skip this step if the BIOS has already negotiated
1093 * speeds with the targets.
1094 */
1095 i = mpt->mpt_port_page2.PortSettings &
1096 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1097 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1098 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1099 "honoring BIOS transfer negotiations\n");
1100 } else {
1101 for (i = 0; i < 16; i++) {
1102 mpt->mpt_dev_page1[i].RequestedParameters = 0;
1103 mpt->mpt_dev_page1[i].Configuration = 0;
1104 (void) mpt_update_spi_config(mpt, i);
1105 }
1106 }
1107 return (0);
1108}
1109
1110int
1111mpt_cam_enable(struct mpt_softc *mpt)
1112{
1113 int error;
1114
1115 MPT_LOCK(mpt);
1116
1117 error = EIO;
1118 if (mpt->is_fc) {
1119 if (mpt_read_config_info_fc(mpt)) {
1120 goto out;
1121 }
1122 if (mpt_set_initial_config_fc(mpt)) {
1123 goto out;
1124 }
1125 } else if (mpt->is_sas) {
1126 if (mpt_read_config_info_sas(mpt)) {
1127 goto out;
1128 }
1129 if (mpt_set_initial_config_sas(mpt)) {
1130 goto out;
1131 }
1132 } else if (mpt->is_spi) {
1133 if (mpt_read_config_info_spi(mpt)) {
1134 goto out;
1135 }
1136 if (mpt_set_initial_config_spi(mpt)) {
1137 goto out;
1138 }
1139 }
1140 error = 0;
1141
1142out:
1143 MPT_UNLOCK(mpt);
1144 return (error);
1145}
1146
1147void
1148mpt_cam_ready(struct mpt_softc *mpt)
1149{
1150 /*
1151 * If we're in target mode, hang out resources now
1152 * so we don't cause the world to hang talking to us.
1153 */
1154 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1155 /*
1156 * Try to add some target command resources
1157 */
1158 MPT_LOCK(mpt);
1159 if (mpt_add_target_commands(mpt) == FALSE) {
1160 mpt_prt(mpt, "failed to add target commands\n");
1161 }
1162 MPT_UNLOCK(mpt);
1163 }
1164 mpt->ready = 1;
1165}
1166
1167void
1168mpt_cam_detach(struct mpt_softc *mpt)
1169{
1170 mpt_handler_t handler;
1171
1172 MPT_LOCK(mpt);
1173 mpt->ready = 0;
1174 mpt_terminate_recovery_thread(mpt);
1175
1176 handler.reply_handler = mpt_scsi_reply_handler;
1177 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1178 scsi_io_handler_id);
1179 handler.reply_handler = mpt_scsi_tmf_reply_handler;
1180 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1181 scsi_tmf_handler_id);
1182 handler.reply_handler = mpt_fc_els_reply_handler;
1183 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1184 fc_els_handler_id);
1185 handler.reply_handler = mpt_scsi_tgt_reply_handler;
1186 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1187 mpt->scsi_tgt_handler_id);
1188 handler.reply_handler = mpt_sata_pass_reply_handler;
1189 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1190 sata_pass_handler_id);
1191
1192 if (mpt->tmf_req != NULL) {
1193 mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1194 mpt_free_request(mpt, mpt->tmf_req);
1195 mpt->tmf_req = NULL;
1196 }
1197 if (mpt->sas_portinfo != NULL) {
1198 kfree(mpt->sas_portinfo, M_DEVBUF);
1199 mpt->sas_portinfo = NULL;
1200 }
2545bca0
MD
1201
1202 if (mpt->sim != NULL) {
1203 xpt_free_path(mpt->path);
1204 xpt_bus_deregister(cam_sim_path(mpt->sim));
1205 cam_sim_free(mpt->sim);
1206 mpt->sim = NULL;
1207 }
1208
1209 if (mpt->phydisk_sim != NULL) {
1210 xpt_free_path(mpt->phydisk_path);
1211 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1212 cam_sim_free(mpt->phydisk_sim);
1213 mpt->phydisk_sim = NULL;
1214 }
6d259fc1 1215 MPT_UNLOCK(mpt);
2545bca0
MD
1216}
1217
1218/* This routine is used after a system crash to dump core onto the swap device.
1219 */
1220static void
1221mpt_poll(struct cam_sim *sim)
1222{
1223 struct mpt_softc *mpt;
1224
1225 mpt = (struct mpt_softc *)cam_sim_softc(sim);
1226 mpt_intr(mpt);
1227}
1228
1229/*
1230 * Watchdog timeout routine for SCSI requests.
1231 */
1232static void
1233mpt_timeout(void *arg)
1234{
1235 union ccb *ccb;
1236 struct mpt_softc *mpt;
1237 request_t *req;
1238
1239 ccb = (union ccb *)arg;
1240 mpt = ccb->ccb_h.ccb_mpt_ptr;
1241
2be58998 1242 MPT_LOCK(mpt);
2545bca0
MD
1243 req = ccb->ccb_h.ccb_req_ptr;
1244 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1245 req->serno, ccb, req->ccb);
1246/* XXX: WHAT ARE WE TRYING TO DO HERE? */
1247 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1248 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1249 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1250 req->state |= REQ_STATE_TIMEDOUT;
1251 mpt_wakeup_recovery_thread(mpt);
1252 }
2be58998 1253 MPT_UNLOCK(mpt);
2545bca0
MD
1254}
1255
1256/*
1257 * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1258 *
1259 * Takes a list of physical segments and builds the SGL for SCSI IO command
1260 * and forwards the commard to the IOC after one last check that CAM has not
1261 * aborted the transaction.
1262 */
1263static void
1264mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1265{
1266 request_t *req, *trq;
1267 char *mpt_off;
1268 union ccb *ccb;
1269 struct mpt_softc *mpt;
1270 int seg, first_lim;
1271 uint32_t flags, nxt_off;
1272 void *sglp = NULL;
1273 MSG_REQUEST_HEADER *hdrp;
1274 SGE_SIMPLE64 *se;
1275 SGE_CHAIN64 *ce;
1276 int istgt = 0;
1277
1278 req = (request_t *)arg;
1279 ccb = req->ccb;
1280
1281 mpt = ccb->ccb_h.ccb_mpt_ptr;
1282 req = ccb->ccb_h.ccb_req_ptr;
1283
1284 hdrp = req->req_vbuf;
1285 mpt_off = req->req_vbuf;
1286
1287 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1288 error = EFBIG;
1289 }
1290
1291 if (error == 0) {
1292 switch (hdrp->Function) {
1293 case MPI_FUNCTION_SCSI_IO_REQUEST:
1294 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1295 istgt = 0;
1296 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1297 break;
1298 case MPI_FUNCTION_TARGET_ASSIST:
1299 istgt = 1;
1300 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1301 break;
1302 default:
1303 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1304 hdrp->Function);
1305 error = EINVAL;
1306 break;
1307 }
1308 }
1309
1310 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1311 error = EFBIG;
1312 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1313 nseg, mpt->max_seg_cnt);
1314 }
1315
1316bad:
1317 if (error != 0) {
1318 if (error != EFBIG && error != ENOMEM) {
1319 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1320 }
1321 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1322 cam_status status;
1323 mpt_freeze_ccb(ccb);
1324 if (error == EFBIG) {
1325 status = CAM_REQ_TOO_BIG;
1326 } else if (error == ENOMEM) {
1327 if (mpt->outofbeer == 0) {
1328 mpt->outofbeer = 1;
1329 xpt_freeze_simq(mpt->sim, 1);
1330 mpt_lprt(mpt, MPT_PRT_DEBUG,
1331 "FREEZEQ\n");
1332 }
1333 status = CAM_REQUEUE_REQ;
1334 } else {
1335 status = CAM_REQ_CMP_ERR;
1336 }
1337 mpt_set_ccb_status(ccb, status);
1338 }
1339 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1340 request_t *cmd_req =
1341 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1342 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1343 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1344 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1345 }
1346 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1347 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1348 xpt_done(ccb);
1349 CAMLOCK_2_MPTLOCK(mpt);
1350 mpt_free_request(mpt, req);
1351 MPTLOCK_2_CAMLOCK(mpt);
1352 return;
1353 }
1354
1355 /*
1356 * No data to transfer?
1357 * Just make a single simple SGL with zero length.
1358 */
1359
1360 if (mpt->verbose >= MPT_PRT_DEBUG) {
1361 int tidx = ((char *)sglp) - mpt_off;
1362 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1363 }
1364
1365 if (nseg == 0) {
1366 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1367 MPI_pSGE_SET_FLAGS(se1,
1368 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1369 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1370 se1->FlagsLength = htole32(se1->FlagsLength);
1371 goto out;
1372 }
1373
1374
1375 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1376 if (istgt == 0) {
1377 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1378 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1379 }
1380 } else {
1381 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1382 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1383 }
1384 }
1385
1386 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1387 bus_dmasync_op_t op;
1388 if (istgt == 0) {
1389 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1390 op = BUS_DMASYNC_PREREAD;
1391 } else {
1392 op = BUS_DMASYNC_PREWRITE;
1393 }
1394 } else {
1395 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1396 op = BUS_DMASYNC_PREWRITE;
1397 } else {
1398 op = BUS_DMASYNC_PREREAD;
1399 }
1400 }
1401 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1402 }
1403
1404 /*
1405 * Okay, fill in what we can at the end of the command frame.
1406 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1407 * the command frame.
1408 *
1409 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1410 * SIMPLE64 pointers and start doing CHAIN64 entries after
1411 * that.
1412 */
1413
1414 if (nseg < MPT_NSGL_FIRST(mpt)) {
1415 first_lim = nseg;
1416 } else {
1417 /*
1418 * Leave room for CHAIN element
1419 */
1420 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1421 }
1422
1423 se = (SGE_SIMPLE64 *) sglp;
1424 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1425 uint32_t tf;
1426
1427 memset(se, 0, sizeof (*se));
1428 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1429 if (sizeof(bus_addr_t) > 4) {
1430 se->Address.High =
1431 htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1432 }
1433 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1434 tf = flags;
1435 if (seg == first_lim - 1) {
1436 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1437 }
1438 if (seg == nseg - 1) {
1439 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1440 MPI_SGE_FLAGS_END_OF_BUFFER;
1441 }
1442 MPI_pSGE_SET_FLAGS(se, tf);
1443 se->FlagsLength = htole32(se->FlagsLength);
1444 }
1445
1446 if (seg == nseg) {
1447 goto out;
1448 }
1449
1450 /*
1451 * Tell the IOC where to find the first chain element.
1452 */
1453 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1454 nxt_off = MPT_RQSL(mpt);
1455 trq = req;
1456
1457 /*
1458 * Make up the rest of the data segments out of a chain element
1459 * (contiained in the current request frame) which points to
1460 * SIMPLE64 elements in the next request frame, possibly ending
1461 * with *another* chain element (if there's more).
1462 */
1463 while (seg < nseg) {
1464 int this_seg_lim;
1465 uint32_t tf, cur_off;
1466 bus_addr_t chain_list_addr;
1467
1468 /*
1469 * Point to the chain descriptor. Note that the chain
1470 * descriptor is at the end of the *previous* list (whether
1471 * chain or simple).
1472 */
1473 ce = (SGE_CHAIN64 *) se;
1474
1475 /*
1476 * Before we change our current pointer, make sure we won't
1477 * overflow the request area with this frame. Note that we
1478 * test against 'greater than' here as it's okay in this case
1479 * to have next offset be just outside the request area.
1480 */
1481 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1482 nxt_off = MPT_REQUEST_AREA;
1483 goto next_chain;
1484 }
1485
1486 /*
1487 * Set our SGE element pointer to the beginning of the chain
1488 * list and update our next chain list offset.
1489 */
1490 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1491 cur_off = nxt_off;
1492 nxt_off += MPT_RQSL(mpt);
1493
1494 /*
1495 * Now initialized the chain descriptor.
1496 */
1497 memset(ce, 0, sizeof (*ce));
1498
1499 /*
1500 * Get the physical address of the chain list.
1501 */
1502 chain_list_addr = trq->req_pbuf;
1503 chain_list_addr += cur_off;
1504 if (sizeof (bus_addr_t) > 4) {
1505 ce->Address.High =
1506 htole32(((uint64_t)chain_list_addr) >> 32);
1507 }
1508 ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1509 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1510 MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1511
1512 /*
1513 * If we have more than a frame's worth of segments left,
1514 * set up the chain list to have the last element be another
1515 * chain descriptor.
1516 */
1517 if ((nseg - seg) > MPT_NSGL(mpt)) {
1518 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1519 /*
1520 * The length of the chain is the length in bytes of the
1521 * number of segments plus the next chain element.
1522 *
1523 * The next chain descriptor offset is the length,
1524 * in words, of the number of segments.
1525 */
1526 ce->Length = (this_seg_lim - seg) *
1527 sizeof (SGE_SIMPLE64);
1528 ce->NextChainOffset = ce->Length >> 2;
1529 ce->Length += sizeof (SGE_CHAIN64);
1530 } else {
1531 this_seg_lim = nseg;
1532 ce->Length = (this_seg_lim - seg) *
1533 sizeof (SGE_SIMPLE64);
1534 }
1535 ce->Length = htole16(ce->Length);
1536
1537 /*
1538 * Fill in the chain list SGE elements with our segment data.
1539 *
1540 * If we're the last element in this chain list, set the last
1541 * element flag. If we're the completely last element period,
1542 * set the end of list and end of buffer flags.
1543 */
1544 while (seg < this_seg_lim) {
1545 memset(se, 0, sizeof (*se));
1546 se->Address.Low = htole32(dm_segs->ds_addr &
1547 0xffffffff);
1548 if (sizeof (bus_addr_t) > 4) {
1549 se->Address.High =
1550 htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1551 }
1552 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1553 tf = flags;
1554 if (seg == this_seg_lim - 1) {
1555 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1556 }
1557 if (seg == nseg - 1) {
1558 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1559 MPI_SGE_FLAGS_END_OF_BUFFER;
1560 }
1561 MPI_pSGE_SET_FLAGS(se, tf);
1562 se->FlagsLength = htole32(se->FlagsLength);
1563 se++;
1564 seg++;
1565 dm_segs++;
1566 }
1567
1568 next_chain:
1569 /*
1570 * If we have more segments to do and we've used up all of
1571 * the space in a request area, go allocate another one
1572 * and chain to that.
1573 */
1574 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1575 request_t *nrq;
1576
1577 CAMLOCK_2_MPTLOCK(mpt);
1578 nrq = mpt_get_request(mpt, FALSE);
1579 MPTLOCK_2_CAMLOCK(mpt);
1580
1581 if (nrq == NULL) {
1582 error = ENOMEM;
1583 goto bad;
1584 }
1585
1586 /*
1587 * Append the new request area on the tail of our list.
1588 */
1589 if ((trq = req->chain) == NULL) {
1590 req->chain = nrq;
1591 } else {
1592 while (trq->chain != NULL) {
1593 trq = trq->chain;
1594 }
1595 trq->chain = nrq;
1596 }
1597 trq = nrq;
1598 mpt_off = trq->req_vbuf;
1599 if (mpt->verbose >= MPT_PRT_DEBUG) {
1600 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1601 }
1602 nxt_off = 0;
1603 }
1604 }
1605out:
1606
1607 /*
1608 * Last time we need to check if this CCB needs to be aborted.
1609 */
1610 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1611 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1612 request_t *cmd_req =
1613 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1614 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1615 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1616 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1617 }
1618 mpt_prt(mpt,
1619 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1620 ccb->ccb_h.status & CAM_STATUS_MASK);
1621 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1622 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1623 }
1624 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1625 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1626 xpt_done(ccb);
1627 CAMLOCK_2_MPTLOCK(mpt);
1628 mpt_free_request(mpt, req);
1629 MPTLOCK_2_CAMLOCK(mpt);
1630 return;
1631 }
1632
1633 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1634 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1635 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1636 mpt_timeout, ccb);
1637 }
1638 if (mpt->verbose > MPT_PRT_DEBUG) {
1639 int nc = 0;
1640 mpt_print_request(req->req_vbuf);
1641 for (trq = req->chain; trq; trq = trq->chain) {
1642 kprintf(" Additional Chain Area %d\n", nc++);
1643 mpt_dump_sgl(trq->req_vbuf, 0);
1644 }
1645 }
1646
1647 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1648 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1649 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1650#ifdef WE_TRUST_AUTO_GOOD_STATUS
1651 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1652 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1653 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1654 } else {
1655 tgt->state = TGT_STATE_MOVING_DATA;
1656 }
1657#else
1658 tgt->state = TGT_STATE_MOVING_DATA;
1659#endif
1660 }
1661 CAMLOCK_2_MPTLOCK(mpt);
1662 mpt_send_cmd(mpt, req);
1663 MPTLOCK_2_CAMLOCK(mpt);
1664}
1665
1666static void
1667mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1668{
1669 request_t *req, *trq;
1670 char *mpt_off;
1671 union ccb *ccb;
1672 struct mpt_softc *mpt;
1673 int seg, first_lim;
1674 uint32_t flags, nxt_off;
1675 void *sglp = NULL;
1676 MSG_REQUEST_HEADER *hdrp;
1677 SGE_SIMPLE32 *se;
1678 SGE_CHAIN32 *ce;
1679 int istgt = 0;
1680
1681 req = (request_t *)arg;
1682 ccb = req->ccb;
1683
1684 mpt = ccb->ccb_h.ccb_mpt_ptr;
1685 req = ccb->ccb_h.ccb_req_ptr;
1686
1687 hdrp = req->req_vbuf;
1688 mpt_off = req->req_vbuf;
1689
1690
1691 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1692 error = EFBIG;
1693 }
1694
1695 if (error == 0) {
1696 switch (hdrp->Function) {
1697 case MPI_FUNCTION_SCSI_IO_REQUEST:
1698 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1699 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1700 break;
1701 case MPI_FUNCTION_TARGET_ASSIST:
1702 istgt = 1;
1703 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1704 break;
1705 default:
1706 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1707 hdrp->Function);
1708 error = EINVAL;
1709 break;
1710 }
1711 }
1712
1713 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1714 error = EFBIG;
1715 mpt_prt(mpt, "segment count %d too large (max %u)\n",
1716 nseg, mpt->max_seg_cnt);
1717 }
1718
1719bad:
1720 if (error != 0) {
1721 if (error != EFBIG && error != ENOMEM) {
1722 mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1723 }
1724 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1725 cam_status status;
1726 mpt_freeze_ccb(ccb);
1727 if (error == EFBIG) {
1728 status = CAM_REQ_TOO_BIG;
1729 } else if (error == ENOMEM) {
1730 if (mpt->outofbeer == 0) {
1731 mpt->outofbeer = 1;
1732 xpt_freeze_simq(mpt->sim, 1);
1733 mpt_lprt(mpt, MPT_PRT_DEBUG,
1734 "FREEZEQ\n");
1735 }
1736 status = CAM_REQUEUE_REQ;
1737 } else {
1738 status = CAM_REQ_CMP_ERR;
1739 }
1740 mpt_set_ccb_status(ccb, status);
1741 }
1742 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1743 request_t *cmd_req =
1744 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1745 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1746 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1747 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1748 }
1749 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1750 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1751 xpt_done(ccb);
1752 CAMLOCK_2_MPTLOCK(mpt);
1753 mpt_free_request(mpt, req);
1754 MPTLOCK_2_CAMLOCK(mpt);
1755 return;
1756 }
1757
1758 /*
1759 * No data to transfer?
1760 * Just make a single simple SGL with zero length.
1761 */
1762
1763 if (mpt->verbose >= MPT_PRT_DEBUG) {
1764 int tidx = ((char *)sglp) - mpt_off;
1765 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1766 }
1767
1768 if (nseg == 0) {
1769 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1770 MPI_pSGE_SET_FLAGS(se1,
1771 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1772 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1773 se1->FlagsLength = htole32(se1->FlagsLength);
1774 goto out;
1775 }
1776
1777
1778 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1779 if (istgt == 0) {
1780 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1781 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1782 }
1783 } else {
1784 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1785 flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1786 }
1787 }
1788
1789 if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1790 bus_dmasync_op_t op;
1791 if (istgt) {
1792 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1793 op = BUS_DMASYNC_PREREAD;
1794 } else {
1795 op = BUS_DMASYNC_PREWRITE;
1796 }
1797 } else {
1798 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1799 op = BUS_DMASYNC_PREWRITE;
1800 } else {
1801 op = BUS_DMASYNC_PREREAD;
1802 }
1803 }
1804 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1805 }
1806
1807 /*
1808 * Okay, fill in what we can at the end of the command frame.
1809 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1810 * the command frame.
1811 *
1812 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1813 * SIMPLE32 pointers and start doing CHAIN32 entries after
1814 * that.
1815 */
1816
1817 if (nseg < MPT_NSGL_FIRST(mpt)) {
1818 first_lim = nseg;
1819 } else {
1820 /*
1821 * Leave room for CHAIN element
1822 */
1823 first_lim = MPT_NSGL_FIRST(mpt) - 1;
1824 }
1825
1826 se = (SGE_SIMPLE32 *) sglp;
1827 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1828 uint32_t tf;
1829
1830 memset(se, 0,sizeof (*se));
1831 se->Address = htole32(dm_segs->ds_addr);
1832
1833
1834
1835 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1836 tf = flags;
1837 if (seg == first_lim - 1) {
1838 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1839 }
1840 if (seg == nseg - 1) {
1841 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1842 MPI_SGE_FLAGS_END_OF_BUFFER;
1843 }
1844 MPI_pSGE_SET_FLAGS(se, tf);
1845 se->FlagsLength = htole32(se->FlagsLength);
1846 }
1847
1848 if (seg == nseg) {
1849 goto out;
1850 }
1851
1852 /*
1853 * Tell the IOC where to find the first chain element.
1854 */
1855 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1856 nxt_off = MPT_RQSL(mpt);
1857 trq = req;
1858
1859 /*
1860 * Make up the rest of the data segments out of a chain element
1861 * (contiained in the current request frame) which points to
1862 * SIMPLE32 elements in the next request frame, possibly ending
1863 * with *another* chain element (if there's more).
1864 */
1865 while (seg < nseg) {
1866 int this_seg_lim;
1867 uint32_t tf, cur_off;
1868 bus_addr_t chain_list_addr;
1869
1870 /*
1871 * Point to the chain descriptor. Note that the chain
1872 * descriptor is at the end of the *previous* list (whether
1873 * chain or simple).
1874 */
1875 ce = (SGE_CHAIN32 *) se;
1876
1877 /*
1878 * Before we change our current pointer, make sure we won't
1879 * overflow the request area with this frame. Note that we
1880 * test against 'greater than' here as it's okay in this case
1881 * to have next offset be just outside the request area.
1882 */
1883 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1884 nxt_off = MPT_REQUEST_AREA;
1885 goto next_chain;
1886 }
1887
1888 /*
1889 * Set our SGE element pointer to the beginning of the chain
1890 * list and update our next chain list offset.
1891 */
1892 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1893 cur_off = nxt_off;
1894 nxt_off += MPT_RQSL(mpt);
1895
1896 /*
1897 * Now initialized the chain descriptor.
1898 */
1899 memset(ce, 0, sizeof (*ce));
1900
1901 /*
1902 * Get the physical address of the chain list.
1903 */
1904 chain_list_addr = trq->req_pbuf;
1905 chain_list_addr += cur_off;
1906
1907
1908
1909 ce->Address = htole32(chain_list_addr);
1910 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1911
1912
1913 /*
1914 * If we have more than a frame's worth of segments left,
1915 * set up the chain list to have the last element be another
1916 * chain descriptor.
1917 */
1918 if ((nseg - seg) > MPT_NSGL(mpt)) {
1919 this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1920 /*
1921 * The length of the chain is the length in bytes of the
1922 * number of segments plus the next chain element.
1923 *
1924 * The next chain descriptor offset is the length,
1925 * in words, of the number of segments.
1926 */
1927 ce->Length = (this_seg_lim - seg) *
1928 sizeof (SGE_SIMPLE32);
1929 ce->NextChainOffset = ce->Length >> 2;
1930 ce->Length += sizeof (SGE_CHAIN32);
1931 } else {
1932 this_seg_lim = nseg;
1933 ce->Length = (this_seg_lim - seg) *
1934 sizeof (SGE_SIMPLE32);
1935 }
1936 ce->Length = htole16(ce->Length);
1937
1938 /*
1939 * Fill in the chain list SGE elements with our segment data.
1940 *
1941 * If we're the last element in this chain list, set the last
1942 * element flag. If we're the completely last element period,
1943 * set the end of list and end of buffer flags.
1944 */
1945 while (seg < this_seg_lim) {
1946 memset(se, 0, sizeof (*se));
1947 se->Address = htole32(dm_segs->ds_addr);
1948
1949
1950
1951
1952 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1953 tf = flags;
1954 if (seg == this_seg_lim - 1) {
1955 tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1956 }
1957 if (seg == nseg - 1) {
1958 tf |= MPI_SGE_FLAGS_END_OF_LIST |
1959 MPI_SGE_FLAGS_END_OF_BUFFER;
1960 }
1961 MPI_pSGE_SET_FLAGS(se, tf);
1962 se->FlagsLength = htole32(se->FlagsLength);
1963 se++;
1964 seg++;
1965 dm_segs++;
1966 }
1967
1968 next_chain:
1969 /*
1970 * If we have more segments to do and we've used up all of
1971 * the space in a request area, go allocate another one
1972 * and chain to that.
1973 */
1974 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1975 request_t *nrq;
1976
1977 CAMLOCK_2_MPTLOCK(mpt);
1978 nrq = mpt_get_request(mpt, FALSE);
1979 MPTLOCK_2_CAMLOCK(mpt);
1980
1981 if (nrq == NULL) {
1982 error = ENOMEM;
1983 goto bad;
1984 }
1985
1986 /*
1987 * Append the new request area on the tail of our list.
1988 */
1989 if ((trq = req->chain) == NULL) {
1990 req->chain = nrq;
1991 } else {
1992 while (trq->chain != NULL) {
1993 trq = trq->chain;
1994 }
1995 trq->chain = nrq;
1996 }
1997 trq = nrq;
1998 mpt_off = trq->req_vbuf;
1999 if (mpt->verbose >= MPT_PRT_DEBUG) {
2000 memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2001 }
2002 nxt_off = 0;
2003 }
2004 }
2005out:
2006
2007 /*
2008 * Last time we need to check if this CCB needs to be aborted.
2009 */
2010 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2011 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2012 request_t *cmd_req =
2013 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2014 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2015 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2016 MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2017 }
2018 mpt_prt(mpt,
2019 "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2020 ccb->ccb_h.status & CAM_STATUS_MASK);
2021 if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2022 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2023 }
2024 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2025 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2026 xpt_done(ccb);
2027 CAMLOCK_2_MPTLOCK(mpt);
2028 mpt_free_request(mpt, req);
2029 MPTLOCK_2_CAMLOCK(mpt);
2030 return;
2031 }
2032
2033 ccb->ccb_h.status |= CAM_SIM_QUEUED;
2034 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2035 mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2036 mpt_timeout, ccb);
2037 }
2038 if (mpt->verbose > MPT_PRT_DEBUG) {
2039 int nc = 0;
2040 mpt_print_request(req->req_vbuf);
2041 for (trq = req->chain; trq; trq = trq->chain) {
2042 kprintf(" Additional Chain Area %d\n", nc++);
2043 mpt_dump_sgl(trq->req_vbuf, 0);
2044 }
2045 }
2046
2047 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2048 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2049 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2050#ifdef WE_TRUST_AUTO_GOOD_STATUS
2051 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2052 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2053 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2054 } else {
2055 tgt->state = TGT_STATE_MOVING_DATA;
2056 }
2057#else
2058 tgt->state = TGT_STATE_MOVING_DATA;
2059#endif
2060 }
2061 CAMLOCK_2_MPTLOCK(mpt);
2062 mpt_send_cmd(mpt, req);
2063 MPTLOCK_2_CAMLOCK(mpt);
2064}
2065
2066static void
2067mpt_start(struct cam_sim *sim, union ccb *ccb)
2068{
2069 request_t *req;
2070 struct mpt_softc *mpt;
2071 MSG_SCSI_IO_REQUEST *mpt_req;
2072 struct ccb_scsiio *csio = &ccb->csio;
2073 struct ccb_hdr *ccbh = &ccb->ccb_h;
2074 bus_dmamap_callback_t *cb;
2075 target_id_t tgt;
2076 int raid_passthru;
2077
2078 /* Get the pointer for the physical addapter */
2079 mpt = ccb->ccb_h.ccb_mpt_ptr;
2080 raid_passthru = (sim == mpt->phydisk_sim);
2081
2082 CAMLOCK_2_MPTLOCK(mpt);
2083 if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2084 if (mpt->outofbeer == 0) {
2085 mpt->outofbeer = 1;
2086 xpt_freeze_simq(mpt->sim, 1);
2087 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2088 }
2089 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2090 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2091 MPTLOCK_2_CAMLOCK(mpt);
2092 xpt_done(ccb);
2093 return;
2094 }
2095#ifdef INVARIANTS
2096 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2097#endif
2098 MPTLOCK_2_CAMLOCK(mpt);
2099
2100 if (sizeof (bus_addr_t) > 4) {
2101 cb = mpt_execute_req_a64;
2102 } else {
2103 cb = mpt_execute_req;
2104 }
2105
2106 /*
2107 * Link the ccb and the request structure so we can find
2108 * the other knowing either the request or the ccb
2109 */
2110 req->ccb = ccb;
2111 ccb->ccb_h.ccb_req_ptr = req;
2112
2113 /* Now we build the command for the IOC */
2114 mpt_req = req->req_vbuf;
2115 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2116
2117 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2118 if (raid_passthru) {
2119 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2120 CAMLOCK_2_MPTLOCK(mpt);
2121 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2122 MPTLOCK_2_CAMLOCK(mpt);
2123 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2124 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2125 xpt_done(ccb);
2126 return;
2127 }
2128 MPTLOCK_2_CAMLOCK(mpt);
2129 mpt_req->Bus = 0; /* we never set bus here */
2130 } else {
2131 tgt = ccb->ccb_h.target_id;
2132 mpt_req->Bus = 0; /* XXX */
2133
2134 }
2135 mpt_req->SenseBufferLength =
2136 (csio->sense_len < MPT_SENSE_SIZE) ?
2137 csio->sense_len : MPT_SENSE_SIZE;
2138
2139 /*
2140 * We use the message context to find the request structure when we
2141 * Get the command completion interrupt from the IOC.
2142 */
2143 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2144
2145 /* Which physical device to do the I/O on */
2146 mpt_req->TargetID = tgt;
2147
2148 /* We assume a single level LUN type */
2149 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2150 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2151 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2152 } else {
2153 mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2154 }
2155
2156 /* Set the direction of the transfer */
2157 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2158 mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2159 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2160 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2161 } else {
2162 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2163 }
2164
2165 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2166 switch(ccb->csio.tag_action) {
2167 case MSG_HEAD_OF_Q_TAG:
2168 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2169 break;
2170 case MSG_ACA_TASK:
2171 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2172 break;
2173 case MSG_ORDERED_Q_TAG:
2174 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2175 break;
2176 case MSG_SIMPLE_Q_TAG:
2177 default:
2178 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2179 break;
2180 }
2181 } else {
2182 if (mpt->is_fc || mpt->is_sas) {
2183 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2184 } else {
2185 /* XXX No such thing for a target doing packetized. */
2186 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2187 }
2188 }
2189
2190 if (mpt->is_spi) {
2191 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2192 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2193 }
2194 }
2195 mpt_req->Control = htole32(mpt_req->Control);
2196
2197 /* Copy the scsi command block into place */
2198 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2199 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2200 } else {
2201 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2202 }
2203
2204 mpt_req->CDBLength = csio->cdb_len;
2205 mpt_req->DataLength = htole32(csio->dxfer_len);
2206 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2207
2208 /*
2209 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2210 */
2211 if (mpt->verbose == MPT_PRT_DEBUG) {
2212 U32 df;
2213 mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2214 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2215 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2216 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2217 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2218 mpt_prtc(mpt, "(%s %u byte%s ",
2219 (df == MPI_SCSIIO_CONTROL_READ)?
2220 "read" : "write", csio->dxfer_len,
2221 (csio->dxfer_len == 1)? ")" : "s)");
2222 }
2223 mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2224 ccb->ccb_h.target_lun, req, req->serno);
2225 }
2226
2227 /*
2228 * If we have any data to send with this command map it into bus space.
2229 */
2230 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2231 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2232 /*
2233 * We've been given a pointer to a single buffer.
2234 */
2235 if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2236 /*
2237 * Virtual address that needs to translated into
2238 * one or more physical address ranges.
2239 */
2240 int error;
6d259fc1 2241 crit_enter();
2545bca0
MD
2242 error = bus_dmamap_load(mpt->buffer_dmat,
2243 req->dmap, csio->data_ptr, csio->dxfer_len,
2244 cb, req, 0);
6d259fc1 2245 crit_exit();
2545bca0
MD
2246 if (error == EINPROGRESS) {
2247 /*
2248 * So as to maintain ordering,
2249 * freeze the controller queue
2250 * until our mapping is
2251 * returned.
2252 */
2253 xpt_freeze_simq(mpt->sim, 1);
2254 ccbh->status |= CAM_RELEASE_SIMQ;
2255 }
2256 } else {
2257 /*
2258 * We have been given a pointer to single
2259 * physical buffer.
2260 */
2261 struct bus_dma_segment seg;
2262 seg.ds_addr =
2263 (bus_addr_t)(vm_offset_t)csio->data_ptr;
2264 seg.ds_len = csio->dxfer_len;
2265 (*cb)(req, &seg, 1, 0);
2266 }
2267 } else {
2268 /*
2269 * We have been given a list of addresses.
2270 * This case could be easily supported but they are not
2271 * currently generated by the CAM subsystem so there
2272 * is no point in wasting the time right now.
2273 */
2274 struct bus_dma_segment *segs;
2275 if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2276 (*cb)(req, NULL, 0, EFAULT);
2277 } else {
2278 /* Just use the segments provided */
2279 segs = (struct bus_dma_segment *)csio->data_ptr;
2280 (*cb)(req, segs, csio->sglist_cnt, 0);
2281 }
2282 }
2283 } else {
2284 (*cb)(req, NULL, 0, 0);
2285 }
2286}
2287
2288static int
2289mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2290 int sleep_ok)
2291{
2292 int error;
2293 uint16_t status;
2294 uint8_t response;
2295
2296 error = mpt_scsi_send_tmf(mpt,
2297 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2298 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2299 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2300 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2301 0, /* XXX How do I get the channel ID? */
2302 tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2303 lun != CAM_LUN_WILDCARD ? lun : 0,
2304 0, sleep_ok);
2305
2306 if (error != 0) {
2307 /*
2308 * mpt_scsi_send_tmf hard resets on failure, so no
2309 * need to do so here.
2310 */
2311 mpt_prt(mpt,
2312 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2313 return (EIO);
2314 }
2315
2316 /* Wait for bus reset to be processed by the IOC. */
2317 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2318 REQ_STATE_DONE, sleep_ok, 5000);
2319
2320 status = le16toh(mpt->tmf_req->IOCStatus);
2321 response = mpt->tmf_req->ResponseCode;
2322 mpt->tmf_req->state = REQ_STATE_FREE;
2323
2324 if (error) {
2325 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2326 "Resetting controller.\n");
2327 mpt_reset(mpt, TRUE);
2328 return (ETIMEDOUT);
2329 }
2330
2331 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2332 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2333 "Resetting controller.\n", status);
2334 mpt_reset(mpt, TRUE);
2335 return (EIO);
2336 }
2337
2338 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2339 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2340 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2341 "Resetting controller.\n", response);
2342 mpt_reset(mpt, TRUE);
2343 return (EIO);
2344 }
2345 return (0);
2346}
2347
2348static int
2349mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2350{
2351 int r = 0;
2352 request_t *req;
2353 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2354
2355 req = mpt_get_request(mpt, FALSE);
2356 if (req == NULL) {
2357 return (ENOMEM);
2358 }
2359 fc = req->req_vbuf;
2360 memset(fc, 0, sizeof(*fc));
2361 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2362 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2363 fc->MsgContext = htole32(req->index | fc_els_handler_id);
2364 mpt_send_cmd(mpt, req);
2365 if (dowait) {
2366 r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2367 REQ_STATE_DONE, FALSE, 60 * 1000);
2368 if (r == 0) {
2369 mpt_free_request(mpt, req);
2370 }
2371 }
2372 return (r);
2373}
2374
6d259fc1
SW
2375static void
2376mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
2377{
2378 xpt_free_path(ccb->ccb_h.path);
2379 kfree(ccb, M_TEMP);
2380}
2381
2545bca0
MD
2382static int
2383mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2384 MSG_EVENT_NOTIFY_REPLY *msg)
2385{
2386 uint32_t data0, data1;
2387
2388 data0 = le32toh(msg->Data[0]);
2389 data1 = le32toh(msg->Data[1]);
2390 switch(msg->Event & 0xFF) {
2391 case MPI_EVENT_UNIT_ATTENTION:
2392 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2393 (data0 >> 8) & 0xff, data0 & 0xff);
2394 break;
2395
2396 case MPI_EVENT_IOC_BUS_RESET:
2397 /* We generated a bus reset */
2398 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2399 (data0 >> 8) & 0xff);
2400 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2401 break;
2402
2403 case MPI_EVENT_EXT_BUS_RESET:
2404 /* Someone else generated a bus reset */
2405 mpt_prt(mpt, "External Bus Reset Detected\n");
2406 /*
2407 * These replies don't return EventData like the MPI
2408 * spec says they do
2409 */
2410 xpt_async(AC_BUS_RESET, mpt->path, NULL);
2411 break;
2412
2413 case MPI_EVENT_RESCAN:
2545bca0
MD
2414 {
2415 union ccb *ccb;
2416 uint32_t pathid;
2417 /*
2418 * In general this means a device has been added to the loop.
2419 */
2420 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2421 if (mpt->ready == 0) {
2422 break;
2423 }
2424 if (mpt->phydisk_sim) {
2425 pathid = cam_sim_path(mpt->phydisk_sim);
2426 } else {
2427 pathid = cam_sim_path(mpt->sim);
2428 }
2429 MPTLOCK_2_CAMLOCK(mpt);
2430 /*
2431 * Allocate a CCB, create a wildcard path for this bus,
2432 * and schedule a rescan.
2433 */
6d259fc1 2434 ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
2545bca0
MD
2435
2436 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2437 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2438 CAMLOCK_2_MPTLOCK(mpt);
2439 mpt_prt(mpt, "unable to create path for rescan\n");
6d259fc1 2440 kfree(ccb, M_TEMP);
2545bca0
MD
2441 break;
2442 }
6d259fc1
SW
2443
2444 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
2445 ccb->ccb_h.func_code = XPT_SCAN_BUS;
2446 ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
2447 ccb->crcn.flags = CAM_FLAG_NONE;
2448 xpt_action(ccb);
2449
2450 /* scan is now in progress */
2451
2545bca0
MD
2452 CAMLOCK_2_MPTLOCK(mpt);
2453 break;
2454 }
2545bca0
MD
2455 case MPI_EVENT_LINK_STATUS_CHANGE:
2456 mpt_prt(mpt, "Port %d: LinkState: %s\n",
2457 (data1 >> 8) & 0xff,
2458 ((data0 & 0xff) == 0)? "Failed" : "Active");
2459 break;
2460
2461 case MPI_EVENT_LOOP_STATE_CHANGE:
2462 switch ((data0 >> 16) & 0xff) {
2463 case 0x01:
2464 mpt_prt(mpt,
2465 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2466 "(Loop Initialization)\n",
2467 (data1 >> 8) & 0xff,
2468 (data0 >> 8) & 0xff,
2469 (data0 ) & 0xff);
2470 switch ((data0 >> 8) & 0xff) {
2471 case 0xF7:
2472 if ((data0 & 0xff) == 0xF7) {
2473 mpt_prt(mpt, "Device needs AL_PA\n");
2474 } else {
2475 mpt_prt(mpt, "Device %02x doesn't like "
2476 "FC performance\n",
2477 data0 & 0xFF);
2478 }
2479 break;
2480 case 0xF8:
2481 if ((data0 & 0xff) == 0xF7) {
2482 mpt_prt(mpt, "Device had loop failure "
2483 "at its receiver prior to acquiring"
2484 " AL_PA\n");
2485 } else {
2486 mpt_prt(mpt, "Device %02x detected loop"
2487 " failure at its receiver\n",
2488 data0 & 0xFF);
2489 }
2490 break;
2491 default:
2492 mpt_prt(mpt, "Device %02x requests that device "
2493 "%02x reset itself\n",
2494 data0 & 0xFF,
2495 (data0 >> 8) & 0xFF);
2496 break;
2497 }
2498 break;
2499 case 0x02:
2500 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2501 "LPE(%02x,%02x) (Loop Port Enable)\n",
2502 (data1 >> 8) & 0xff, /* Port */
2503 (data0 >> 8) & 0xff, /* Character 3 */
2504 (data0 ) & 0xff /* Character 4 */);
2505 break;
2506 case 0x03:
2507 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2508 "LPB(%02x,%02x) (Loop Port Bypass)\n",
2509 (data1 >> 8) & 0xff, /* Port */
2510 (data0 >> 8) & 0xff, /* Character 3 */
2511 (data0 ) & 0xff /* Character 4 */);
2512 break;
2513 default:
2514 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2515 "FC event (%02x %02x %02x)\n",
2516 (data1 >> 8) & 0xff, /* Port */
2517 (data0 >> 16) & 0xff, /* Event */
2518 (data0 >> 8) & 0xff, /* Character 3 */
2519 (data0 ) & 0xff /* Character 4 */);
2520 }
2521 break;
2522
2523 case MPI_EVENT_LOGOUT:
2524 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2525 (data1 >> 8) & 0xff, data0);
2526 break;
2527 case MPI_EVENT_QUEUE_FULL:
2528 {
2529 struct cam_sim *sim;
2530 struct cam_path *tmppath;
2531 struct ccb_relsim crs;
2532 PTR_EVENT_DATA_QUEUE_FULL pqf;
2533 lun_id_t lun_id;
2534
2535 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2536 pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2537 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2538 "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2539 if (mpt->phydisk_sim) {
2540 sim = mpt->phydisk_sim;
2541 } else {
2542 sim = mpt->sim;
2543 }
2544 MPTLOCK_2_CAMLOCK(mpt);
2545 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2546 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2547 pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2548 mpt_prt(mpt, "unable to create a path to send "
2549 "XPT_REL_SIMQ");
2550 CAMLOCK_2_MPTLOCK(mpt);
2551 break;
2552 }
2553 xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2554 crs.ccb_h.func_code = XPT_REL_SIMQ;
6d259fc1 2555 crs.ccb_h.flags = CAM_DEV_QFREEZE;
2545bca0
MD
2556 crs.release_flags = RELSIM_ADJUST_OPENINGS;
2557 crs.openings = pqf->CurrentDepth - 1;
2558 xpt_action((union ccb *)&crs);
2559 if (crs.ccb_h.status != CAM_REQ_CMP) {
2560 mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2561 }
2562 xpt_free_path(tmppath);
2563 }
2564 CAMLOCK_2_MPTLOCK(mpt);
2565 break;
2566 }
6d259fc1
SW
2567 case MPI_EVENT_IR_RESYNC_UPDATE:
2568 mpt_prt(mpt, "IR resync update %d completed\n",
2569 (data0 >> 16) & 0xff);
2570 break;
2545bca0
MD
2571 case MPI_EVENT_EVENT_CHANGE:
2572 case MPI_EVENT_INTEGRATED_RAID:
2573 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2574 case MPI_EVENT_SAS_SES:
2575 break;
2576 default:
2577 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
6d259fc1 2578 msg->Event & 0xFF);
2545bca0
MD
2579 return (0);
2580 }
2581 return (1);
2582}
2583
2584/*
2585 * Reply path for all SCSI I/O requests, called from our
2586 * interrupt handler by extracting our handler index from
2587 * the MsgContext field of the reply from the IOC.
2588 *
2589 * This routine is optimized for the common case of a
2590 * completion without error. All exception handling is
2591 * offloaded to non-inlined helper routines to minimize
2592 * cache footprint.
2593 */
2594static int
2595mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2596 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2597{
2598 MSG_SCSI_IO_REQUEST *scsi_req;
2599 union ccb *ccb;
2600
2601 if (req->state == REQ_STATE_FREE) {
2602 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2603 return (TRUE);
2604 }
2605
2606 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2607 ccb = req->ccb;
2608 if (ccb == NULL) {
2609 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2610 req, req->serno);
2611 return (TRUE);
2612 }
2613
2614 mpt_req_untimeout(req, mpt_timeout, ccb);
2615 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2616
2617 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2618 bus_dmasync_op_t op;
2619
2620 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2621 op = BUS_DMASYNC_POSTREAD;
2622 else
2623 op = BUS_DMASYNC_POSTWRITE;
2624 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2625 bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2626 }
2627
2628 if (reply_frame == NULL) {
2629 /*
2630 * Context only reply, completion without error status.
2631 */
2632 ccb->csio.resid = 0;
2633 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2634 ccb->csio.scsi_status = SCSI_STATUS_OK;
2635 } else {
2636 mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2637 }
2638
2639 if (mpt->outofbeer) {
2640 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2641 mpt->outofbeer = 0;
2642 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2643 }
2644 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2645 struct scsi_inquiry_data *iq =
2646 (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2647 if (scsi_req->Function ==
2648 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2649 /*
2650 * Fake out the device type so that only the
2651 * pass-thru device will attach.
2652 */
2653 iq->device &= ~0x1F;
2654 iq->device |= T_NODEVICE;
2655 }
2656 }
2657 if (mpt->verbose == MPT_PRT_DEBUG) {
2658 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2659 req, req->serno);
2660 }
2661 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2662 MPTLOCK_2_CAMLOCK(mpt);
2663 xpt_done(ccb);
2664 CAMLOCK_2_MPTLOCK(mpt);
2665 if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2666 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2667 } else {
2668 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2669 req, req->serno);
2670 TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2671 }
2672 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2673 ("CCB req needed wakeup"));
2674#ifdef INVARIANTS
2675 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2676#endif
2677 mpt_free_request(mpt, req);
2678 return (TRUE);
2679}
2680
2681static int
2682mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2683 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2684{
2685 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2686
2687 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2688#ifdef INVARIANTS
2689 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2690#endif
2691 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2692 /* Record IOC Status and Response Code of TMF for any waiters. */
2693 req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2694 req->ResponseCode = tmf_reply->ResponseCode;
2695
2696 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2697 req, req->serno, le16toh(tmf_reply->IOCStatus));
2698 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2699 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2700 req->state |= REQ_STATE_DONE;
2701 wakeup(req);
2702 } else {
2703 mpt->tmf_req->state = REQ_STATE_FREE;
2704 }
2705 return (TRUE);
2706}
2707
2708/*
2709 * XXX: Move to definitions file
2710 */
2711#define ELS 0x22
2712#define FC4LS 0x32
2713#define ABTS 0x81
2714#define BA_ACC 0x84
2715
2716#define LS_RJT 0x01
2717#define LS_ACC 0x02
2718#define PLOGI 0x03
2719#define LOGO 0x05
2720#define SRR 0x14
2721#define PRLI 0x20
2722#define PRLO 0x21
2723#define ADISC 0x52
2724#define RSCN 0x61
2725
2726static void
2727mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2728 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2729{
2730 uint32_t fl;
2731 MSG_LINK_SERVICE_RSP_REQUEST tmp;
2732 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2733
2734 /*
2735 * We are going to reuse the ELS request to send this response back.
2736 */
2737 rsp = &tmp;
2738 memset(rsp, 0, sizeof(*rsp));
2739
2740#ifdef USE_IMMEDIATE_LINK_DATA
2741 /*
2742 * Apparently the IMMEDIATE stuff doesn't seem to work.
2743 */
2744 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2745#endif
2746 rsp->RspLength = length;
2747 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2748 rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2749
2750 /*
2751 * Copy over information from the original reply frame to
2752 * it's correct place in the response.
2753 */
2754 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2755
2756 /*
2757 * And now copy back the temporary area to the original frame.
2758 */
2759 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2760 rsp = req->req_vbuf;
2761
2762#ifdef USE_IMMEDIATE_LINK_DATA
2763 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2764#else
2765{
2766 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2767 bus_addr_t paddr = req->req_pbuf;
2768 paddr += MPT_RQSL(mpt);
2769
2770 fl =
2771 MPI_SGE_FLAGS_HOST_TO_IOC |
2772 MPI_SGE_FLAGS_SIMPLE_ELEMENT |
2773 MPI_SGE_FLAGS_LAST_ELEMENT |
2774 MPI_SGE_FLAGS_END_OF_LIST |
2775 MPI_SGE_FLAGS_END_OF_BUFFER;
2776 fl <<= MPI_SGE_FLAGS_SHIFT;
2777 fl |= (length);
2778 se->FlagsLength = htole32(fl);
2779 se->Address = htole32((uint32_t) paddr);
2780}
2781#endif
2782
2783 /*
2784 * Send it on...
2785 */
2786 mpt_send_cmd(mpt, req);
2787}
2788
2789static int
2790mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2791 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2792{
2793 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2794 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2795 U8 rctl;
2796 U8 type;
2797 U8 cmd;
2798 U16 status = le16toh(reply_frame->IOCStatus);
2799 U32 *elsbuf;
2800 int ioindex;
2801 int do_refresh = TRUE;
2802
2803#ifdef INVARIANTS
2804 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2805 ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2806 req, req->serno, rp->Function));
2807 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2808 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2809 } else {
2810 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2811 }
2812#endif
2813 mpt_lprt(mpt, MPT_PRT_DEBUG,
2814 "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2815 req, req->serno, reply_frame, reply_frame->Function);
2816
2817 if (status != MPI_IOCSTATUS_SUCCESS) {
2818 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2819 status, reply_frame->Function);
2820 if (status == MPI_IOCSTATUS_INVALID_STATE) {
2821 /*
2822 * XXX: to get around shutdown issue
2823 */
2824 mpt->disabled = 1;
2825 return (TRUE);
2826 }
2827 return (TRUE);
2828 }
2829
2830 /*
2831 * If the function of a link service response, we recycle the
2832 * response to be a refresh for a new link service request.
2833 *
2834 * The request pointer is bogus in this case and we have to fetch
2835 * it based upon the TransactionContext.
2836 */
2837 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2838 /* Freddie Uncle Charlie Katie */
2839 /* We don't get the IOINDEX as part of the Link Svc Rsp */
2840 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2841 if (mpt->els_cmd_ptrs[ioindex] == req) {
2842 break;
2843 }
2844
2845 KASSERT(ioindex < mpt->els_cmds_allocated,
2846 ("can't find my mommie!"));
2847
2848 /* remove from active list as we're going to re-post it */
2849 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2850 req->state &= ~REQ_STATE_QUEUED;
2851 req->state |= REQ_STATE_DONE;
2852 mpt_fc_post_els(mpt, req, ioindex);
2853 return (TRUE);
2854 }
2855
2856 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2857 /* remove from active list as we're done */
2858 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2859 req->state &= ~REQ_STATE_QUEUED;
2860 req->state |= REQ_STATE_DONE;
2861 if (req->state & REQ_STATE_TIMEDOUT) {
2862 mpt_lprt(mpt, MPT_PRT_DEBUG,
2863 "Sync Primitive Send Completed After Timeout\n");
2864 mpt_free_request(mpt, req);
2865 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2866 mpt_lprt(mpt, MPT_PRT_DEBUG,
2867 "Async Primitive Send Complete\n");
2868 mpt_free_request(mpt, req);
2869 } else {
2870 mpt_lprt(mpt, MPT_PRT_DEBUG,
2871 "Sync Primitive Send Complete- Waking Waiter\n");
2872 wakeup(req);
2873 }
2874 return (TRUE);
2875 }
2876
2877 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2878 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2879 "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2880 rp->MsgLength, rp->MsgFlags);
2881 return (TRUE);
2882 }
2883
2884 if (rp->MsgLength <= 5) {
2885 /*
2886 * This is just a ack of an original ELS buffer post
2887 */
2888 mpt_lprt(mpt, MPT_PRT_DEBUG,
2889 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2890 return (TRUE);
2891 }
2892
2893
2894 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2895 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2896
2897 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2898 cmd = be32toh(elsbuf[0]) >> 24;
2899
2900 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2901 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2902 return (TRUE);
2903 }
2904
2905 ioindex = le32toh(rp->TransactionContext);
2906 req = mpt->els_cmd_ptrs[ioindex];
2907
2908 if (rctl == ELS && type == 1) {
2909 switch (cmd) {
2910 case PRLI:
2911 /*
2912 * Send back a PRLI ACC
2913 */
2914 mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2915 le32toh(rp->Wwn.PortNameHigh),
2916 le32toh(rp->Wwn.PortNameLow));
2917 elsbuf[0] = htobe32(0x02100014);
2918 elsbuf[1] |= htobe32(0x00000100);
2919 elsbuf[4] = htobe32(0x00000002);
2920 if (mpt->role & MPT_ROLE_TARGET)
2921 elsbuf[4] |= htobe32(0x00000010);
2922 if (mpt->role & MPT_ROLE_INITIATOR)
2923 elsbuf[4] |= htobe32(0x00000020);
2924 /* remove from active list as we're done */
2925 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2926 req->state &= ~REQ_STATE_QUEUED;
2927 req->state |= REQ_STATE_DONE;
2928 mpt_fc_els_send_response(mpt, req, rp, 20);
2929 do_refresh = FALSE;
2930 break;
2931 case PRLO:
2932 memset(elsbuf, 0, 5 * (sizeof (U32)));
2933 elsbuf[0] = htobe32(0x02100014);
2934 elsbuf[1] = htobe32(0x08000100);
2935 mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2936 le32toh(rp->Wwn.PortNameHigh),
2937 le32toh(rp->Wwn.PortNameLow));
2938 /* remove from active list as we're done */
2939 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2940 req->state &= ~REQ_STATE_QUEUED;
2941 req->state |= REQ_STATE_DONE;
2942 mpt_fc_els_send_response(mpt, req, rp, 20);
2943 do_refresh = FALSE;
2944 break;
2945 default:
2946 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2947 break;
2948 }
2949 } else if (rctl == ABTS && type == 0) {
2950 uint16_t rx_id = le16toh(rp->Rxid);
2951 uint16_t ox_id = le16toh(rp->Oxid);
2952 request_t *tgt_req = NULL;
2953
2954 mpt_prt(mpt,
2955 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2956 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2957 le32toh(rp->Wwn.PortNameLow));
2958 if (rx_id >= mpt->mpt_max_tgtcmds) {
2959 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2960 } else if (mpt->tgt_cmd_ptrs == NULL) {
2961 mpt_prt(mpt, "No TGT CMD PTRS\n");
2962 } else {
2963 tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2964 }
2965 if (tgt_req) {
2966 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
6d259fc1 2967 union ccb *ccb;
2545bca0
MD
2968 uint32_t ct_id;
2969
2970 /*
2971 * Check to make sure we have the correct command
2972 * The reply descriptor in the target state should
2973 * should contain an IoIndex that should match the
2974 * RX_ID.
2975 *
2976 * It'd be nice to have OX_ID to crosscheck with
2977 * as well.
2978 */
2979 ct_id = GET_IO_INDEX(tgt->reply_desc);
2980
2981 if (ct_id != rx_id) {
2982 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2983 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2984 rx_id, ct_id);
2985 goto skip;
2986 }
2987
2988 ccb = tgt->ccb;
2989 if (ccb) {
2990 mpt_prt(mpt,
2991 "CCB (%p): lun %u flags %x status %x\n",
2992 ccb, ccb->ccb_h.target_lun,
2993 ccb->ccb_h.flags, ccb->ccb_h.status);
2994 }
2995 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2996 "%x nxfers %x\n", tgt->state,
2997 tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2998 tgt->nxfers);
2999 skip:
3000 if (mpt_abort_target_cmd(mpt, tgt_req)) {
3001 mpt_prt(mpt, "unable to start TargetAbort\n");
3002 }
3003 } else {
3004 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3005 }
3006 memset(elsbuf, 0, 5 * (sizeof (U32)));
3007 elsbuf[0] = htobe32(0);
3008 elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3009 elsbuf[2] = htobe32(0x000ffff);
3010 /*
6d259fc1 3011 * Dork with the reply frame so that the response to it
2545bca0
MD
3012 * will be correct.
3013 */
3014 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3015 /* remove from active list as we're done */
3016 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3017 req->state &= ~REQ_STATE_QUEUED;
3018 req->state |= REQ_STATE_DONE;
3019 mpt_fc_els_send_response(mpt, req, rp, 12);
3020 do_refresh = FALSE;
3021 } else {
3022 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3023 }
3024 if (do_refresh == TRUE) {
3025 /* remove from active list as we're done */
3026 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3027 req->state &= ~REQ_STATE_QUEUED;
3028 req->state |= REQ_STATE_DONE;
3029 mpt_fc_post_els(mpt, req, ioindex);
3030 }
3031 return (TRUE);
3032}
3033
3034/*
3035 * Clean up all SCSI Initiator personality state in response
3036 * to a controller reset.
3037 */
3038static void
3039mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3040{
3041 /*
3042 * The pending list is already run down by
3043 * the generic handler. Perform the same
3044 * operation on the timed out request list.
3045 */
3046 mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3047 MPI_IOCSTATUS_INVALID_STATE);
3048
3049 /*
3050 * XXX: We need to repost ELS and Target Command Buffers?
3051 */
3052
3053 /*
3054 * Inform the XPT that a bus reset has occurred.
3055 */
3056 xpt_async(AC_BUS_RESET, mpt->path, NULL);
3057}
3058
3059/*
3060 * Parse additional completion information in the reply
3061 * frame for SCSI I/O requests.
3062 */
3063static int
3064mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3065 MSG_DEFAULT_REPLY *reply_frame)
3066{
3067 union ccb *ccb;
3068 MSG_SCSI_IO_REPLY *scsi_io_reply;
3069 u_int ioc_status;
3070 u_int sstate;
3071
3072 MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3073 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3074 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3075 ("MPT SCSI I/O Handler called with incorrect reply type"));
3076 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3077 ("MPT SCSI I/O Handler called with continuation reply"));
3078
3079 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3080 ioc_status = le16toh(scsi_io_reply->IOCStatus);
3081 ioc_status &= MPI_IOCSTATUS_MASK;
3082 sstate = scsi_io_reply->SCSIState;
3083
3084 ccb = req->ccb;
3085 ccb->csio.resid =
3086 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3087
3088 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3089 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3090 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3091 ccb->csio.sense_resid =
3092 ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount);
3093 bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3094 min(ccb->csio.sense_len,
3095 le32toh(scsi_io_reply->SenseCount)));
3096 }
3097
3098 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3099 /*
3100 * Tag messages rejected, but non-tagged retry
3101 * was successful.
3102XXXX
3103 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3104 */
3105 }
3106
3107 switch(ioc_status) {
3108 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3109 /*
3110 * XXX
3111 * Linux driver indicates that a zero
3112 * transfer length with this error code
3113 * indicates a CRC error.
3114 *
3115 * No need to swap the bytes for checking
3116 * against zero.
3117 */
3118 if (scsi_io_reply->TransferCount == 0) {
3119 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3120 break;
3121 }
3122 /* FALLTHROUGH */
3123 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3124 case MPI_IOCSTATUS_SUCCESS:
3125 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3126 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3127 /*
3128 * Status was never returned for this transaction.
3129 */
3130 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3131 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3132 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3133 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3134 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3135 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3136 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3137
6d259fc1 3138 /* XXX Handle SPI-Packet and FCP-2 response info. */
2545bca0
MD
3139 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3140 } else
3141 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3142 break;
3143 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3144 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3145 break;
3146 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3147 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3148 break;
3149 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3150 /*
3151 * Since selection timeouts and "device really not
3152 * there" are grouped into this error code, report
3153 * selection timeout. Selection timeouts are
3154 * typically retried before giving up on the device
3155 * whereas "device not there" errors are considered
3156 * unretryable.
3157 */
3158 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3159 break;
3160 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3161 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3162 break;
3163 case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3164 mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3165 break;
3166 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3167 mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3168 break;
3169 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3170 ccb->ccb_h.status = CAM_UA_TERMIO;
3171 break;
3172 case MPI_IOCSTATUS_INVALID_STATE:
3173 /*
3174 * The IOC has been reset. Emulate a bus reset.
3175 */
3176 /* FALLTHROUGH */
3177 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3178 ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3179 break;
3180 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3181 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3182 /*
3183 * Don't clobber any timeout status that has
3184 * already been set for this transaction. We
3185 * want the SCSI layer to be able to differentiate
3186 * between the command we aborted due to timeout
3187 * and any innocent bystanders.
3188 */
3189 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3190 break;
3191 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3192 break;
3193
3194 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3195 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3196 break;
3197 case MPI_IOCSTATUS_BUSY:
3198 mpt_set_ccb_status(ccb, CAM_BUSY);
3199 break;
3200 case MPI_IOCSTATUS_INVALID_FUNCTION:
3201 case MPI_IOCSTATUS_INVALID_SGL:
3202 case MPI_IOCSTATUS_INTERNAL_ERROR:
3203 case MPI_IOCSTATUS_INVALID_FIELD:
3204 default:
3205 /* XXX
3206 * Some of the above may need to kick
3207 * of a recovery action!!!!
3208 */
3209 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3210 break;
3211 }
3212
3213 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3214 mpt_freeze_ccb(ccb);
3215 }
3216
3217 return (TRUE);
3218}
3219
3220static void
3221mpt_action(struct cam_sim *sim, union ccb *ccb)
3222{
3223 struct mpt_softc *mpt;
3224 struct ccb_trans_settings *cts;
3225 target_id_t tgt;
3226 lun_id_t lun;
3227 int raid_passthru;
3228
3229 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3230
3231 mpt = (struct mpt_softc *)cam_sim_softc(sim);
3232 raid_passthru = (sim == mpt->phydisk_sim);
3233 MPT_LOCK_ASSERT(mpt);
3234
3235 tgt = ccb->ccb_h.target_id;
3236 lun = ccb->ccb_h.target_lun;
3237 if (raid_passthru &&
3238 ccb->ccb_h.func_code != XPT_PATH_INQ &&
3239 ccb->ccb_h.func_code != XPT_RESET_BUS &&
3240 ccb->ccb_h.func_code != XPT_RESET_DEV) {
3241 CAMLOCK_2_MPTLOCK(mpt);
3242 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3243 MPTLOCK_2_CAMLOCK(mpt);
3244 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3245 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3246 xpt_done(ccb);
3247 return;
3248 }
3249 MPTLOCK_2_CAMLOCK(mpt);
3250 }
3251 ccb->ccb_h.ccb_mpt_ptr = mpt;
3252
3253 switch (ccb->ccb_h.func_code) {
3254 case XPT_SCSI_IO: /* Execute the requested I/O operation */
3255 /*
3256 * Do a couple of preliminary checks...
3257 */
3258 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3259 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3260 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3261 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3262 break;
3263 }
3264 }
3265 /* Max supported CDB length is 16 bytes */
3266 /* XXX Unless we implement the new 32byte message type */
3267 if (ccb->csio.cdb_len >
3268 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3269 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3270 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3271 break;
3272 }
3273#ifdef MPT_TEST_MULTIPATH
3274 if (mpt->failure_id == ccb->ccb_h.target_id) {
3275 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3276 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3277 break;
3278 }
3279#endif
3280 ccb->csio.scsi_status = SCSI_STATUS_OK;
3281 mpt_start(sim, ccb);
3282 return;
3283
3284 case XPT_RESET_BUS:
3285 if (raid_passthru) {
3286 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3287 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3288 break;
3289 }
3290 case XPT_RESET_DEV:
3291 if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3292 if (bootverbose) {
3293 xpt_print(ccb->ccb_h.path, "reset bus\n");
3294 }
3295 } else {
3296 xpt_print(ccb->ccb_h.path, "reset device\n");
3297 }
3298 CAMLOCK_2_MPTLOCK(mpt);
3299 (void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3300 MPTLOCK_2_CAMLOCK(mpt);
3301
3302 /*
3303 * mpt_bus_reset is always successful in that it
3304 * will fall back to a hard reset should a bus
3305 * reset attempt fail.
3306 */
3307 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3308 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3309 break;
3310
3311 case XPT_ABORT:
3312 {
3313 union ccb *accb = ccb->cab.abort_ccb;
3314 CAMLOCK_2_MPTLOCK(mpt);
3315 switch (accb->ccb_h.func_code) {
3316 case XPT_ACCEPT_TARGET_IO:
3317 case XPT_IMMED_NOTIFY:
3318 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3319 break;
3320 case XPT_CONT_TARGET_IO:
3321 mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3322 ccb->ccb_h.status = CAM_UA_ABORT;
3323 break;
3324 case XPT_SCSI_IO:
3325 ccb->ccb_h.status = CAM_UA_ABORT;
3326 break;
3327 default:
3328 ccb->ccb_h.status = CAM_REQ_INVALID;
3329 break;
3330 }
3331 MPTLOCK_2_CAMLOCK(mpt);
3332 break;
3333 }
3334
3335#ifdef CAM_NEW_TRAN_CODE
3336#define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3337#else
3338#define IS_CURRENT_SETTINGS(c) ((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3339#endif
3340#define DP_DISC_ENABLE 0x1
3341#define DP_DISC_DISABL 0x2
3342#define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL)
3343
3344#define DP_TQING_ENABLE 0x4
3345#define DP_TQING_DISABL 0x8
3346#define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL)
3347
3348#define DP_WIDE 0x10
3349#define DP_NARROW 0x20
3350#define DP_WIDTH (DP_WIDE|DP_NARROW)
3351
3352#define DP_SYNC 0x40
3353
3354 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */
3355 {
3356#ifdef CAM_NEW_TRAN_CODE
3357 struct ccb_trans_settings_scsi *scsi;
3358 struct ccb_trans_settings_spi *spi;
3359#endif
3360 uint8_t dval;
3361 u_int period;
3362 u_int offset;
3363 int i, j;
3364
3365 cts = &ccb->cts;
3366
3367 if (mpt->is_fc || mpt->is_sas) {
3368 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3369 break;
3370 }
3371
3372#ifdef CAM_NEW_TRAN_CODE
3373 scsi = &cts->proto_specific.scsi;
3374 spi = &cts->xport_specific.spi;
3375
3376 /*
3377 * We can be called just to valid transport and proto versions
3378 */
3379 if (scsi->valid == 0 && spi->valid == 0) {
3380 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3381 break;
3382 }
3383#endif
3384
3385 /*
3386 * Skip attempting settings on RAID volume disks.
3387 * Other devices on the bus get the normal treatment.
3388 */
3389 if (mpt->phydisk_sim && raid_passthru == 0 &&
3390 mpt_is_raid_volume(mpt, tgt) != 0) {
3391 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3392 "no transfer settings for RAID vols\n");
3393 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3394 break;
3395 }
3396
3397 i = mpt->mpt_port_page2.PortSettings &
3398 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3399 j = mpt->mpt_port_page2.PortFlags &
3400 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3401 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3402 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3403 mpt_lprt(mpt, MPT_PRT_ALWAYS,
3404 "honoring BIOS transfer negotiations\n");
3405 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3406 break;
3407 }
3408
3409 dval = 0;
3410 period = 0;
3411 offset = 0;
3412
3413#ifndef CAM_NEW_TRAN_CODE
3414 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3415 dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3416 DP_DISC_ENABLE : DP_DISC_DISABL;
3417 }
3418
3419 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3420 dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3421 DP_TQING_ENABLE : DP_TQING_DISABL;
3422 }
3423
3424 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3425 dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3426 }
3427
3428 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3429 (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3430 dval |= DP_SYNC;
3431 period = cts->sync_period;
3432 offset = cts->sync_offset;
3433 }
3434#else
3435 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3436 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3437 DP_DISC_ENABLE : DP_DISC_DISABL;
3438 }
3439
3440 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3441 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3442 DP_TQING_ENABLE : DP_TQING_DISABL;
3443 }
3444
3445 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3446 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3447 DP_WIDE : DP_NARROW;
3448 }
3449
3450 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3451 dval |= DP_SYNC;
3452 offset = spi->sync_offset;
3453 } else {
3454 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3455 &mpt->mpt_dev_page1[tgt];
3456 offset = ptr->RequestedParameters;
3457 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3458 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3459 }
3460 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3461 dval |= DP_SYNC;
3462 period = spi->sync_period;
3463 } else {
3464 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3465 &mpt->mpt_dev_page1[tgt];
3466 period = ptr->RequestedParameters;
3467 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3468 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3469 }
3470#endif
3471 CAMLOCK_2_MPTLOCK(mpt);
3472 if (dval & DP_DISC_ENABLE) {
3473 mpt->mpt_disc_enable |= (1 << tgt);
3474 } else if (dval & DP_DISC_DISABL) {
3475 mpt->mpt_disc_enable &= ~(1 << tgt);
3476 }
3477 if (dval & DP_TQING_ENABLE) {
3478 mpt->mpt_tag_enable |= (1 << tgt);
3479 } else if (dval & DP_TQING_DISABL) {
3480 mpt->mpt_tag_enable &= ~(1 << tgt);
3481 }
3482 if (dval & DP_WIDTH) {
3483 mpt_setwidth(mpt, tgt, 1);
3484 }
3485 if (dval & DP_SYNC) {
3486 mpt_setsync(mpt, tgt, period, offset);
3487 }
3488 if (dval == 0) {
3489 MPTLOCK_2_CAMLOCK(mpt);
3490 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3491 break;
3492 }
3493 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3494 "set [%d]: 0x%x period 0x%x offset %d\n",
3495 tgt, dval, period, offset);
3496 if (mpt_update_spi_config(mpt, tgt)) {
3497 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3498 } else {
3499 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3500 }
3501 MPTLOCK_2_CAMLOCK(mpt);
3502 break;
3503 }
3504 case XPT_GET_TRAN_SETTINGS:
3505 {
3506#ifdef CAM_NEW_TRAN_CODE
3507 struct ccb_trans_settings_scsi *scsi;
3508 cts = &ccb->cts;
3509 cts->protocol = PROTO_SCSI;
3510 if (mpt->is_fc) {
3511 struct ccb_trans_settings_fc *fc =
3512 &cts->xport_specific.fc;
3513 cts->protocol_version = SCSI_REV_SPC;
3514 cts->transport = XPORT_FC;
3515 cts->transport_version = 0;
3516 fc->valid = CTS_FC_VALID_SPEED;
3517 fc->bitrate = 100000;
3518 } else if (mpt->is_sas) {
3519 struct ccb_trans_settings_sas *sas =
3520 &cts->xport_specific.sas;
3521 cts->protocol_version = SCSI_REV_SPC2;
3522 cts->transport = XPORT_SAS;
3523 cts->transport_version = 0;
3524 sas->valid = CTS_SAS_VALID_SPEED;
3525 sas->bitrate = 300000;
3526 } else {
3527 cts->protocol_version = SCSI_REV_2;
3528 cts->transport = XPORT_SPI;
3529 cts->transport_version = 2;
3530 if (mpt_get_spi_settings(mpt, cts) != 0) {
3531 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3532 break;
3533 }
3534 }
3535 scsi = &cts->proto_specific.scsi;
3536 scsi->valid = CTS_SCSI_VALID_TQ;
3537 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3538#else
3539 cts = &ccb->cts;
3540 if (mpt->is_fc) {
3541 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3542 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3543 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3544 } else if (mpt->is_sas) {
3545 cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3546 cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3547 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3548 } else if (mpt_get_spi_settings(mpt, cts) != 0) {
3549 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3550 break;
3551 }
3552#endif
3553 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3554 break;
3555 }
3556 case XPT_CALC_GEOMETRY:
3557 {
3558 struct ccb_calc_geometry *ccg;
3559
3560 ccg = &ccb->ccg;
3561 if (ccg->block_size == 0) {
3562 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3563 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3564 break;
3565 }
3566 mpt_calc_geometry(ccg, /*extended*/1);
3567 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3568 break;
3569 }
3570 case XPT_PATH_INQ: /* Path routing inquiry */
3571 {
3572 struct ccb_pathinq *cpi = &ccb->cpi;
3573
3574 cpi->version_num = 1;
3575 cpi->target_sprt = 0;
3576 cpi->hba_eng_cnt = 0;
3577 cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
6d259fc1
SW
3578#if 0 /* XXX swildner */
3579 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3580#endif
2545bca0
MD
3581 /*
3582 * FC cards report MAX_DEVICES of 512, but
3583 * the MSG_SCSI_IO_REQUEST target id field
3584 * is only 8 bits. Until we fix the driver
3585 * to support 'channels' for bus overflow,
3586 * just limit it.
3587 */
3588 if (cpi->max_target > 255) {
3589 cpi->max_target = 255;
3590 }
3591
3592 /*
3593 * VMware ESX reports > 16 devices and then dies when we probe.
3594 */
3595 if (mpt->is_spi && cpi->max_target > 15) {
3596 cpi->max_target = 15;
3597 }
3598 if (mpt->is_spi)
3599 cpi->max_lun = 7;
3600 else
3601 cpi->max_lun = MPT_MAX_LUNS;
3602 cpi->initiator_id = mpt->mpt_ini_id;
3603 cpi->bus_id = cam_sim_bus(sim);
3604
3605 /*
3606 * The base speed is the speed of the underlying connection.
3607 */
3608#ifdef CAM_NEW_TRAN_CODE
3609 cpi->protocol = PROTO_SCSI;
3610 if (mpt->is_fc) {
3611 cpi->hba_misc = PIM_NOBUSRESET;
3612 cpi->base_transfer_speed = 100000;
3613 cpi->hba_inquiry = PI_TAG_ABLE;
3614 cpi->transport = XPORT_FC;
3615 cpi->transport_version = 0;
3616 cpi->protocol_version = SCSI_REV_SPC;
3617 } else if (mpt->is_sas) {
3618 cpi->hba_misc = PIM_NOBUSRESET;
3619 cpi->base_transfer_speed = 300000;
3620 cpi->hba_inquiry = PI_TAG_ABLE;
3621 cpi->transport = XPORT_SAS;
3622 cpi->transport_version = 0;
3623 cpi->protocol_version = SCSI_REV_SPC2;
3624 } else {
3625 cpi->hba_misc = PIM_SEQSCAN;
3626 cpi->base_transfer_speed = 3300;
3627 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3628 cpi->transport = XPORT_SPI;
3629 cpi->transport_version = 2;
3630 cpi->protocol_version = SCSI_REV_2;
3631 }
3632#else
3633 if (mpt->is_fc) {
3634 cpi->hba_misc = PIM_NOBUSRESET;
3635 cpi->base_transfer_speed = 100000;
3636 cpi->hba_inquiry = PI_TAG_ABLE;
3637 } else if (mpt->is_sas) {
3638 cpi->hba_misc = PIM_NOBUSRESET;
3639 cpi->base_transfer_speed = 300000;
3640 cpi->hba_inquiry = PI_TAG_ABLE;
3641 } else {
3642 cpi->hba_misc = PIM_SEQSCAN;
3643 cpi->base_transfer_speed = 3300;
3644 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3645 }
3646#endif
3647
3648 /*
3649 * We give our fake RAID passhtru bus a width that is MaxVolumes
3650 * wide and restrict it to one lun.
3651 */
3652 if (raid_passthru) {
3653 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3654 cpi->initiator_id = cpi->max_target + 1;
3655 cpi->max_lun = 0;
3656 }
3657
3658 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3659 cpi->hba_misc |= PIM_NOINITIATOR;
3660 }
3661 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3662 cpi->target_sprt =
3663 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3664 } else {
3665 cpi->target_sprt = 0;
3666 }
3667 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3668 strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3669 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3670 cpi->unit_number = cam_sim_unit(sim);
3671 cpi->ccb_h.status = CAM_REQ_CMP;
3672 break;
3673 }
3674 case XPT_EN_LUN: /* Enable LUN as a target */
3675 {
3676 int result;
3677
3678 CAMLOCK_2_MPTLOCK(mpt);
3679 if (ccb->cel.enable)
3680 result = mpt_enable_lun(mpt,
3681 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3682 else
3683 result = mpt_disable_lun(mpt,
3684 ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3685 MPTLOCK_2_CAMLOCK(mpt);
3686 if (result == 0) {
3687 mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3688 } else {
3689 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3690 }
3691 break;
3692 }
3693 case XPT_NOTIFY_ACK: /* recycle notify ack */
3694 case XPT_IMMED_NOTIFY: /* Add Immediate Notify Resource */
3695 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */
3696 {
3697 tgt_resource_t *trtp;
3698 lun_id_t lun = ccb->ccb_h.target_lun;
3699 ccb->ccb_h.sim_priv.entries[0].field = 0;
3700 ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3701 ccb->ccb_h.flags = 0;
3702
3703 if (lun == CAM_LUN_WILDCARD) {
3704 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3705 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3706 break;
3707 }
3708 trtp = &mpt->trt_wildcard;
3709 } else if (lun >= MPT_MAX_LUNS) {
3710 mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3711 break;
3712 } else {
3713 trtp = &mpt->trt[lun];
3714 }
3715 CAMLOCK_2_MPTLOCK(mpt);
3716 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3717 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3718 "Put FREE ATIO %p lun %d\n", ccb, lun);
3719 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3720 sim_links.stqe);
3721 } else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3722 mpt_lprt(mpt, MPT_PRT_DEBUG1,
3723 "Put FREE INOT lun %d\n", lun);
3724 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3725 sim_links.stqe);
3726 } else {
3727 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3728 }
3729 mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3730 MPTLOCK_2_CAMLOCK(mpt);
3731 return;
3732 }
3733 case XPT_CONT_TARGET_IO:
3734 CAMLOCK_2_MPTLOCK(mpt);
3735 mpt_target_start_io(mpt, ccb);
3736 MPTLOCK_2_CAMLOCK(mpt);
3737 return;
3738
3739 default:
3740 ccb->ccb_h.status = CAM_REQ_INVALID;
3741 break;
3742 }
3743 xpt_done(ccb);
3744}
3745
3746static int
3747mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3748{
3749#ifdef CAM_NEW_TRAN_CODE
3750 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3751 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3752#endif
3753 target_id_t tgt;
3754 uint32_t dval, pval, oval;
3755 int rv;
3756
3757 if (IS_CURRENT_SETTINGS(cts) == 0) {
3758 tgt = cts->ccb_h.target_id;
3759 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3760 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3761 return (-1);
3762 }
3763 } else {
3764 tgt = cts->ccb_h.target_id;
3765 }
3766
3767 /*
3768 * We aren't looking at Port Page 2 BIOS settings here-
3769 * sometimes these have been known to be bogus XXX.
3770 *
3771 * For user settings, we pick the max from port page 0
3772 *
3773 * For current settings we read the current settings out from
3774 * device page 0 for that target.
3775 */
3776 if (IS_CURRENT_SETTINGS(cts)) {
3777 CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3778 dval = 0;
3779
3780 CAMLOCK_2_MPTLOCK(mpt);
3781 tmp = mpt->mpt_dev_page0[tgt];
3782 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3783 sizeof(tmp), FALSE, 5000);
3784 if (rv) {
3785 MPTLOCK_2_CAMLOCK(mpt);
3786 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3787 return (rv);
3788 }
3789 mpt2host_config_page_scsi_device_0(&tmp);
3790
3791 MPTLOCK_2_CAMLOCK(mpt);
3792 mpt_lprt(mpt, MPT_PRT_DEBUG,
6d259fc1
SW
3793 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3794 tmp.NegotiatedParameters, tmp.Information);
2545bca0
MD
3795 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3796 DP_WIDE : DP_NARROW;
3797 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3798 DP_DISC_ENABLE : DP_DISC_DISABL;
3799 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3800 DP_TQING_ENABLE : DP_TQING_DISABL;
3801 oval = tmp.NegotiatedParameters;
3802 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3803 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3804 pval = tmp.NegotiatedParameters;
3805 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3806 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3807 mpt->mpt_dev_page0[tgt] = tmp;
3808 } else {
3809 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3810 oval = mpt->mpt_port_page0.Capabilities;
3811 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3812 pval = mpt->mpt_port_page0.Capabilities;
3813 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3814 }
3815
3816#ifndef CAM_NEW_TRAN_CODE
3817 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3818 cts->valid = 0;
3819 cts->sync_period = pval;
3820 cts->sync_offset = oval;
3821 cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3822 cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3823 cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3824 if (dval & DP_WIDE) {
3825 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3826 } else {
3827 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3828 }
3829 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3830 cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3831 if (dval & DP_DISC_ENABLE) {
3832 cts->flags |= CCB_TRANS_DISC_ENB;
3833 }
3834 if (dval & DP_TQING_ENABLE) {
3835 cts->flags |= CCB_TRANS_TAG_ENB;
3836 }
3837 }
3838#else
3839 spi->valid = 0;
3840 scsi->valid = 0;
3841 spi->flags = 0;
3842 scsi->flags = 0;
3843 spi->sync_offset = oval;
3844 spi->sync_period = pval;
3845 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3846 spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3847 spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3848 if (dval & DP_WIDE) {
3849 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3850 } else {
3851 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3852 }
3853 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3854 scsi->valid = CTS_SCSI_VALID_TQ;
3855 if (dval & DP_TQING_ENABLE) {
3856 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3857 }
3858 spi->valid |= CTS_SPI_VALID_DISC;
3859 if (dval & DP_DISC_ENABLE) {
3860 spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3861 }
3862 }
3863#endif
3864 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3865 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3866 IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3867 return (0);
3868}
3869
3870static void
3871mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3872{
3873 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3874
3875 ptr = &mpt->mpt_dev_page1[tgt];
3876 if (onoff) {
3877 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3878 } else {
3879 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3880 }
3881}
3882
3883static void
3884mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3885{
3886 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3887
3888 ptr = &mpt->mpt_dev_page1[tgt];
3889 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3890 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3891 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3892 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3893 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3894 if (period == 0) {
3895 return;
3896 }
3897 ptr->RequestedParameters |=
3898 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3899 ptr->RequestedParameters |=
3900 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3901 if (period < 0xa) {
3902 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3903 }
3904 if (period < 0x9) {
3905 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3906 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3907 }
3908}
3909
3910static int
3911mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3912{
3913 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3914 int rv;
3915
3916 mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3917 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
6d259fc1 3918 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
2545bca0
MD
3919 tmp = mpt->mpt_dev_page1[tgt];
3920 host2mpt_config_page_scsi_device_1(&tmp);
3921 rv = mpt_write_cur_cfg_page(mpt, tgt,
3922 &tmp.Header, sizeof(tmp), FALSE, 5000);
3923 if (rv) {
3924 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3925 return (-1);
3926 }
3927 return (0);
3928}
3929
3930static void
3931mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3932{
2545bca0 3933 cam_calc_geometry(ccg, extended);
2545bca0
MD
3934}
3935
3936/****************************** Timeout Recovery ******************************/
3937static int
3938mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3939{
3940 int error;
3941
3942 error = mpt_kthread_create(mpt_recovery_thread, mpt,
3943 &mpt->recovery_thread, /*flags*/0,
3944 /*altstack*/0, "mpt_recovery%d", mpt->unit);
3945 return (error);
3946}
3947
3948static void
3949mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3950{
3951 if (mpt->recovery_thread == NULL) {
3952 return;
3953 }
3954 mpt->shutdwn_recovery = 1;
3955 wakeup(mpt);
3956 /*
3957 * Sleep on a slightly different location
3958 * for this interlock just for added safety.
3959 */
6d259fc1 3960 mpt_sleep(mpt, &mpt->recovery_thread, 0, "thtrm", 0);
2545bca0
MD
3961}
3962
3963static void
3964mpt_recovery_thread(void *arg)
3965{
3966 struct mpt_softc *mpt;
3967
3968 mpt = (struct mpt_softc *)arg;
3969 MPT_LOCK(mpt);
3970 for (;;) {
3971 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3972 if (mpt->shutdwn_recovery == 0) {
6d259fc1 3973 mpt_sleep(mpt, mpt, 0, "idle", 0);
2545bca0
MD
3974 }
3975 }
3976 if (mpt->shutdwn_recovery != 0) {
3977 break;
3978 }
3979 mpt_recover_commands(mpt);
3980 }
3981 mpt->recovery_thread = NULL;
3982 wakeup(&mpt->recovery_thread);
3983 MPT_UNLOCK(mpt);
6d259fc1 3984 mpt_kthread_exit(0);
2545bca0
MD
3985}
3986
3987static int
3988mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
3989 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
3990{
3991 MSG_SCSI_TASK_MGMT *tmf_req;
3992 int error;
3993
3994 /*
3995 * Wait for any current TMF request to complete.
3996 * We're only allowed to issue one TMF at a time.
3997 */
3998 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
3999 sleep_ok, MPT_TMF_MAX_TIMEOUT);
4000 if (error != 0) {
4001 mpt_reset(mpt, TRUE);
4002 return (ETIMEDOUT);
4003 }
4004
4005 mpt_assign_serno(mpt, mpt->tmf_req);
4006 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
4007
4008 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
4009 memset(tmf_req, 0, sizeof(*tmf_req));
4010 tmf_req->TargetID = target;
4011 tmf_req->Bus = channel;
4012 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4013 tmf_req->TaskType = type;
4014 tmf_req->MsgFlags = flags;
4015 tmf_req->MsgContext =
4016 htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
4017 if (lun > MPT_MAX_LUNS) {
4018 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4019 tmf_req->LUN[1] = lun & 0xff;
4020 } else {
4021 tmf_req->LUN[1] = lun;
4022 }
4023 tmf_req->TaskMsgContext = abort_ctx;
4024
4025 mpt_lprt(mpt, MPT_PRT_DEBUG,
6d259fc1
SW
4026 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
4027 mpt->tmf_req->serno, tmf_req->MsgContext);
2545bca0
MD
4028 if (mpt->verbose > MPT_PRT_DEBUG) {
4029 mpt_print_request(tmf_req);
4030 }
4031
4032 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4033 ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4034 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4035 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4036 if (error != MPT_OK) {
4037 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4038 mpt->tmf_req->state = REQ_STATE_FREE;
4039 mpt_reset(mpt, TRUE);
4040 }
4041 return (error);
4042}
4043
4044/*
4045 * When a command times out, it is placed on the requeust_timeout_list
4046 * and we wake our recovery thread. The MPT-Fusion architecture supports
4047 * only a single TMF operation at a time, so we serially abort/bdr, etc,
4048 * the timedout transactions. The next TMF is issued either by the
4049 * completion handler of the current TMF waking our recovery thread,
4050 * or the TMF timeout handler causing a hard reset sequence.
4051 */
4052static void
4053mpt_recover_commands(struct mpt_softc *mpt)
4054{
4055 request_t *req;
4056 union ccb *ccb;
4057 int error;
4058
4059 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4060 /*
4061 * No work to do- leave.
4062 */
4063 mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4064 return;
4065 }
4066
4067 /*
4068 * Flush any commands whose completion coincides with their timeout.
4069 */
4070 mpt_intr(mpt);
4071
4072 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4073 /*
4074 * The timedout commands have already
4075 * completed. This typically means
4076 * that either the timeout value was on
4077 * the hairy edge of what the device
4078 * requires or - more likely - interrupts
4079 * are not happening.
4080 */
4081 mpt_prt(mpt, "Timedout requests already complete. "
4082 "Interrupts may not be functioning.\n");
4083 mpt_enable_ints(mpt);
4084 return;
4085 }
4086
4087 /*
4088 * We have no visibility into the current state of the
4089 * controller, so attempt to abort the commands in the
4090 * order they timed-out. For initiator commands, we
4091 * depend on the reply handler pulling requests off
4092 * the timeout list.
4093 */
4094 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4095 uint16_t status;
4096 uint8_t response;
4097 MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4098
4099 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4100 req, req->serno, hdrp->Function);
4101 ccb = req->ccb;
4102 if (ccb == NULL) {
4103 mpt_prt(mpt, "null ccb in timed out request. "
4104 "Resetting Controller.\n");
4105 mpt_reset(mpt, TRUE);
4106 continue;
4107 }
4108 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4109