CAM - add xpt_action_async()
[dragonfly.git] / sys / bus / cam / cam_xpt.c
... / ...
CommitLineData
1/*
2 * Implementation of the Common Access Method Transport (XPT) layer.
3 *
4 * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
5 * Copyright (c) 1997, 1998, 1999 Kenneth D. Merry.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions, and the following disclaimer,
13 * without modification, immediately at the beginning of the file.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 * $FreeBSD: src/sys/cam/cam_xpt.c,v 1.80.2.18 2002/12/09 17:31:55 gibbs Exp $
30 * $DragonFly: src/sys/bus/cam/cam_xpt.c,v 1.68 2008/08/23 17:13:31 pavalos Exp $
31 */
32#include <sys/param.h>
33#include <sys/systm.h>
34#include <sys/types.h>
35#include <sys/malloc.h>
36#include <sys/kernel.h>
37#include <sys/time.h>
38#include <sys/conf.h>
39#include <sys/device.h>
40#include <sys/fcntl.h>
41#include <sys/md5.h>
42#include <sys/devicestat.h>
43#include <sys/interrupt.h>
44#include <sys/sbuf.h>
45#include <sys/taskqueue.h>
46#include <sys/bus.h>
47#include <sys/thread.h>
48#include <sys/lock.h>
49#include <sys/spinlock.h>
50#include <sys/thread2.h>
51#include <sys/spinlock2.h>
52
53#include <machine/clock.h>
54#include <machine/stdarg.h>
55
56#include "cam.h"
57#include "cam_ccb.h"
58#include "cam_periph.h"
59#include "cam_sim.h"
60#include "cam_xpt.h"
61#include "cam_xpt_sim.h"
62#include "cam_xpt_periph.h"
63#include "cam_debug.h"
64
65#include "scsi/scsi_all.h"
66#include "scsi/scsi_message.h"
67#include "scsi/scsi_pass.h"
68#include <sys/kthread.h>
69#include "opt_cam.h"
70
71/* Datastructures internal to the xpt layer */
72MALLOC_DEFINE(M_CAMXPT, "CAM XPT", "CAM XPT buffers");
73
74/* Object for defering XPT actions to a taskqueue */
75struct xpt_task {
76 struct task task;
77 void *data1;
78 uintptr_t data2;
79};
80
81/*
82 * Definition of an async handler callback block. These are used to add
83 * SIMs and peripherals to the async callback lists.
84 */
85struct async_node {
86 SLIST_ENTRY(async_node) links;
87 u_int32_t event_enable; /* Async Event enables */
88 void (*callback)(void *arg, u_int32_t code,
89 struct cam_path *path, void *args);
90 void *callback_arg;
91};
92
93SLIST_HEAD(async_list, async_node);
94SLIST_HEAD(periph_list, cam_periph);
95
96/*
97 * This is the maximum number of high powered commands (e.g. start unit)
98 * that can be outstanding at a particular time.
99 */
100#ifndef CAM_MAX_HIGHPOWER
101#define CAM_MAX_HIGHPOWER 4
102#endif
103
104/*
105 * Structure for queueing a device in a run queue.
106 * There is one run queue for allocating new ccbs,
107 * and another for sending ccbs to the controller.
108 */
109struct cam_ed_qinfo {
110 cam_pinfo pinfo;
111 struct cam_ed *device;
112};
113
114/*
115 * The CAM EDT (Existing Device Table) contains the device information for
116 * all devices for all busses in the system. The table contains a
117 * cam_ed structure for each device on the bus.
118 */
119struct cam_ed {
120 TAILQ_ENTRY(cam_ed) links;
121 struct cam_ed_qinfo alloc_ccb_entry;
122 struct cam_ed_qinfo send_ccb_entry;
123 struct cam_et *target;
124 struct cam_sim *sim;
125 lun_id_t lun_id;
126 struct camq drvq; /*
127 * Queue of type drivers wanting to do
128 * work on this device.
129 */
130 struct cam_ccbq ccbq; /* Queue of pending ccbs */
131 struct async_list asyncs; /* Async callback info for this B/T/L */
132 struct periph_list periphs; /* All attached devices */
133 u_int generation; /* Generation number */
134 struct cam_periph *owner; /* Peripheral driver's ownership tag */
135 struct xpt_quirk_entry *quirk; /* Oddities about this device */
136 /* Storage for the inquiry data */
137 cam_proto protocol;
138 u_int protocol_version;
139 cam_xport transport;
140 u_int transport_version;
141 struct scsi_inquiry_data inq_data;
142 u_int8_t inq_flags; /*
143 * Current settings for inquiry flags.
144 * This allows us to override settings
145 * like disconnection and tagged
146 * queuing for a device.
147 */
148 u_int8_t queue_flags; /* Queue flags from the control page */
149 u_int8_t serial_num_len;
150 u_int8_t *serial_num;
151 u_int32_t qfrozen_cnt;
152 u_int32_t flags;
153#define CAM_DEV_UNCONFIGURED 0x01
154#define CAM_DEV_REL_TIMEOUT_PENDING 0x02
155#define CAM_DEV_REL_ON_COMPLETE 0x04
156#define CAM_DEV_REL_ON_QUEUE_EMPTY 0x08
157#define CAM_DEV_RESIZE_QUEUE_NEEDED 0x10
158#define CAM_DEV_TAG_AFTER_COUNT 0x20
159#define CAM_DEV_INQUIRY_DATA_VALID 0x40
160#define CAM_DEV_IN_DV 0x80
161#define CAM_DEV_DV_HIT_BOTTOM 0x100
162 u_int32_t tag_delay_count;
163#define CAM_TAG_DELAY_COUNT 5
164 u_int32_t tag_saved_openings;
165 u_int32_t refcount;
166 struct callout callout;
167};
168
169/*
170 * Each target is represented by an ET (Existing Target). These
171 * entries are created when a target is successfully probed with an
172 * identify, and removed when a device fails to respond after a number
173 * of retries, or a bus rescan finds the device missing.
174 */
175struct cam_et {
176 TAILQ_HEAD(, cam_ed) ed_entries;
177 TAILQ_ENTRY(cam_et) links;
178 struct cam_eb *bus;
179 target_id_t target_id;
180 u_int32_t refcount;
181 u_int generation;
182 struct timeval last_reset; /* uptime of last reset */
183};
184
185/*
186 * Each bus is represented by an EB (Existing Bus). These entries
187 * are created by calls to xpt_bus_register and deleted by calls to
188 * xpt_bus_deregister.
189 */
190struct cam_eb {
191 TAILQ_HEAD(, cam_et) et_entries;
192 TAILQ_ENTRY(cam_eb) links;
193 path_id_t path_id;
194 struct cam_sim *sim;
195 struct timeval last_reset; /* uptime of last reset */
196 u_int32_t flags;
197#define CAM_EB_RUNQ_SCHEDULED 0x01
198 u_int32_t refcount;
199 u_int generation;
200};
201
202struct cam_path {
203 struct cam_periph *periph;
204 struct cam_eb *bus;
205 struct cam_et *target;
206 struct cam_ed *device;
207};
208
209struct xpt_quirk_entry {
210 struct scsi_inquiry_pattern inq_pat;
211 u_int8_t quirks;
212#define CAM_QUIRK_NOLUNS 0x01
213#define CAM_QUIRK_NOSERIAL 0x02
214#define CAM_QUIRK_HILUNS 0x04
215#define CAM_QUIRK_NOHILUNS 0x08
216 u_int mintags;
217 u_int maxtags;
218};
219
220static int cam_srch_hi = 0;
221TUNABLE_INT("kern.cam.cam_srch_hi", &cam_srch_hi);
222static int sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS);
223SYSCTL_PROC(_kern_cam, OID_AUTO, cam_srch_hi, CTLTYPE_INT|CTLFLAG_RW, 0, 0,
224 sysctl_cam_search_luns, "I",
225 "allow search above LUN 7 for SCSI3 and greater devices");
226
227#define CAM_SCSI2_MAXLUN 8
228/*
229 * If we're not quirked to search <= the first 8 luns
230 * and we are either quirked to search above lun 8,
231 * or we're > SCSI-2 and we've enabled hilun searching,
232 * or we're > SCSI-2 and the last lun was a success,
233 * we can look for luns above lun 8.
234 */
235#define CAN_SRCH_HI_SPARSE(dv) \
236 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
237 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
238 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2 && cam_srch_hi)))
239
240#define CAN_SRCH_HI_DENSE(dv) \
241 (((dv->quirk->quirks & CAM_QUIRK_NOHILUNS) == 0) \
242 && ((dv->quirk->quirks & CAM_QUIRK_HILUNS) \
243 || (SID_ANSI_REV(&dv->inq_data) > SCSI_REV_2)))
244
245typedef enum {
246 XPT_FLAG_OPEN = 0x01
247} xpt_flags;
248
249struct xpt_softc {
250 xpt_flags flags;
251 u_int32_t xpt_generation;
252
253 /* number of high powered commands that can go through right now */
254 STAILQ_HEAD(highpowerlist, ccb_hdr) highpowerq;
255 int num_highpower;
256
257 /* queue for handling async rescan requests. */
258 TAILQ_HEAD(, ccb_hdr) ccb_scanq;
259 int ccb_scanq_running;
260
261 /* Registered busses */
262 TAILQ_HEAD(,cam_eb) xpt_busses;
263 u_int bus_generation;
264
265 struct intr_config_hook *xpt_config_hook;
266
267 struct lock xpt_topo_lock;
268 struct lock xpt_lock;
269};
270
271static const char quantum[] = "QUANTUM";
272static const char sony[] = "SONY";
273static const char west_digital[] = "WDIGTL";
274static const char samsung[] = "SAMSUNG";
275static const char seagate[] = "SEAGATE";
276static const char microp[] = "MICROP";
277
278static struct xpt_quirk_entry xpt_quirk_table[] =
279{
280 {
281 /* Reports QUEUE FULL for temporary resource shortages */
282 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP39100*", "*" },
283 /*quirks*/0, /*mintags*/24, /*maxtags*/32
284 },
285 {
286 /* Reports QUEUE FULL for temporary resource shortages */
287 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP34550*", "*" },
288 /*quirks*/0, /*mintags*/24, /*maxtags*/32
289 },
290 {
291 /* Reports QUEUE FULL for temporary resource shortages */
292 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "XP32275*", "*" },
293 /*quirks*/0, /*mintags*/24, /*maxtags*/32
294 },
295 {
296 /* Broken tagged queuing drive */
297 { T_DIRECT, SIP_MEDIA_FIXED, microp, "4421-07*", "*" },
298 /*quirks*/0, /*mintags*/0, /*maxtags*/0
299 },
300 {
301 /* Broken tagged queuing drive */
302 { T_DIRECT, SIP_MEDIA_FIXED, "HP", "C372*", "*" },
303 /*quirks*/0, /*mintags*/0, /*maxtags*/0
304 },
305 {
306 /* Broken tagged queuing drive */
307 { T_DIRECT, SIP_MEDIA_FIXED, microp, "3391*", "x43h" },
308 /*quirks*/0, /*mintags*/0, /*maxtags*/0
309 },
310 {
311 /*
312 * Unfortunately, the Quantum Atlas III has the same
313 * problem as the Atlas II drives above.
314 * Reported by: "Johan Granlund" <johan@granlund.nu>
315 *
316 * For future reference, the drive with the problem was:
317 * QUANTUM QM39100TD-SW N1B0
318 *
319 * It's possible that Quantum will fix the problem in later
320 * firmware revisions. If that happens, the quirk entry
321 * will need to be made specific to the firmware revisions
322 * with the problem.
323 *
324 */
325 /* Reports QUEUE FULL for temporary resource shortages */
326 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM39100*", "*" },
327 /*quirks*/0, /*mintags*/24, /*maxtags*/32
328 },
329 {
330 /*
331 * 18 Gig Atlas III, same problem as the 9G version.
332 * Reported by: Andre Albsmeier
333 * <andre.albsmeier@mchp.siemens.de>
334 *
335 * For future reference, the drive with the problem was:
336 * QUANTUM QM318000TD-S N491
337 */
338 /* Reports QUEUE FULL for temporary resource shortages */
339 { T_DIRECT, SIP_MEDIA_FIXED, quantum, "QM318000*", "*" },
340 /*quirks*/0, /*mintags*/24, /*maxtags*/32
341 },
342 {
343 /*
344 * Broken tagged queuing drive
345 * Reported by: Bret Ford <bford@uop.cs.uop.edu>
346 * and: Martin Renters <martin@tdc.on.ca>
347 */
348 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST410800*", "71*" },
349 /*quirks*/0, /*mintags*/0, /*maxtags*/0
350 },
351 /*
352 * The Seagate Medalist Pro drives have very poor write
353 * performance with anything more than 2 tags.
354 *
355 * Reported by: Paul van der Zwan <paulz@trantor.xs4all.nl>
356 * Drive: <SEAGATE ST36530N 1444>
357 *
358 * Reported by: Jeremy Lea <reg@shale.csir.co.za>
359 * Drive: <SEAGATE ST34520W 1281>
360 *
361 * No one has actually reported that the 9G version
362 * (ST39140*) of the Medalist Pro has the same problem, but
363 * we're assuming that it does because the 4G and 6.5G
364 * versions of the drive are broken.
365 */
366 {
367 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST34520*", "*"},
368 /*quirks*/0, /*mintags*/2, /*maxtags*/2
369 },
370 {
371 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST36530*", "*"},
372 /*quirks*/0, /*mintags*/2, /*maxtags*/2
373 },
374 {
375 { T_DIRECT, SIP_MEDIA_FIXED, seagate, "ST39140*", "*"},
376 /*quirks*/0, /*mintags*/2, /*maxtags*/2
377 },
378 {
379 /*
380 * Slow when tagged queueing is enabled. Write performance
381 * steadily drops off with more and more concurrent
382 * transactions. Best sequential write performance with
383 * tagged queueing turned off and write caching turned on.
384 *
385 * PR: kern/10398
386 * Submitted by: Hideaki Okada <hokada@isl.melco.co.jp>
387 * Drive: DCAS-34330 w/ "S65A" firmware.
388 *
389 * The drive with the problem had the "S65A" firmware
390 * revision, and has also been reported (by Stephen J.
391 * Roznowski <sjr@home.net>) for a drive with the "S61A"
392 * firmware revision.
393 *
394 * Although no one has reported problems with the 2 gig
395 * version of the DCAS drive, the assumption is that it
396 * has the same problems as the 4 gig version. Therefore
397 * this quirk entries disables tagged queueing for all
398 * DCAS drives.
399 */
400 { T_DIRECT, SIP_MEDIA_FIXED, "IBM", "DCAS*", "*" },
401 /*quirks*/0, /*mintags*/0, /*maxtags*/0
402 },
403 {
404 /* Broken tagged queuing drive */
405 { T_DIRECT, SIP_MEDIA_REMOVABLE, "iomega", "jaz*", "*" },
406 /*quirks*/0, /*mintags*/0, /*maxtags*/0
407 },
408 {
409 /* Broken tagged queuing drive */
410 { T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CFP2107*", "*" },
411 /*quirks*/0, /*mintags*/0, /*maxtags*/0
412 },
413 {
414 /* This does not support other than LUN 0 */
415 { T_DIRECT, SIP_MEDIA_FIXED, "VMware*", "*", "*" },
416 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
417 },
418 {
419 /*
420 * Broken tagged queuing drive.
421 * Submitted by:
422 * NAKAJI Hiroyuki <nakaji@zeisei.dpri.kyoto-u.ac.jp>
423 * in PR kern/9535
424 */
425 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN34324U*", "*" },
426 /*quirks*/0, /*mintags*/0, /*maxtags*/0
427 },
428 {
429 /*
430 * Slow when tagged queueing is enabled. (1.5MB/sec versus
431 * 8MB/sec.)
432 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
433 * Best performance with these drives is achieved with
434 * tagged queueing turned off, and write caching turned on.
435 */
436 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "WDE*", "*" },
437 /*quirks*/0, /*mintags*/0, /*maxtags*/0
438 },
439 {
440 /*
441 * Slow when tagged queueing is enabled. (1.5MB/sec versus
442 * 8MB/sec.)
443 * Submitted by: Andrew Gallatin <gallatin@cs.duke.edu>
444 * Best performance with these drives is achieved with
445 * tagged queueing turned off, and write caching turned on.
446 */
447 { T_DIRECT, SIP_MEDIA_FIXED, west_digital, "ENTERPRISE", "*" },
448 /*quirks*/0, /*mintags*/0, /*maxtags*/0
449 },
450 {
451 /*
452 * Doesn't handle queue full condition correctly,
453 * so we need to limit maxtags to what the device
454 * can handle instead of determining this automatically.
455 */
456 { T_DIRECT, SIP_MEDIA_FIXED, samsung, "WN321010S*", "*" },
457 /*quirks*/0, /*mintags*/2, /*maxtags*/32
458 },
459 {
460 /* Really only one LUN */
461 { T_ENCLOSURE, SIP_MEDIA_FIXED, "SUN", "SENA", "*" },
462 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
463 },
464 {
465 /* I can't believe we need a quirk for DPT volumes. */
466 { T_ANY, SIP_MEDIA_FIXED|SIP_MEDIA_REMOVABLE, "DPT", "*", "*" },
467 CAM_QUIRK_NOLUNS,
468 /*mintags*/0, /*maxtags*/255
469 },
470 {
471 /*
472 * Many Sony CDROM drives don't like multi-LUN probing.
473 */
474 { T_CDROM, SIP_MEDIA_REMOVABLE, sony, "CD-ROM CDU*", "*" },
475 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
476 },
477 {
478 /*
479 * This drive doesn't like multiple LUN probing.
480 * Submitted by: Parag Patel <parag@cgt.com>
481 */
482 { T_WORM, SIP_MEDIA_REMOVABLE, sony, "CD-R CDU9*", "*" },
483 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
484 },
485 {
486 { T_WORM, SIP_MEDIA_REMOVABLE, "YAMAHA", "CDR100*", "*" },
487 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
488 },
489 {
490 /*
491 * The 8200 doesn't like multi-lun probing, and probably
492 * don't like serial number requests either.
493 */
494 {
495 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
496 "EXB-8200*", "*"
497 },
498 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
499 },
500 {
501 /*
502 * Let's try the same as above, but for a drive that says
503 * it's an IPL-6860 but is actually an EXB 8200.
504 */
505 {
506 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "EXABYTE",
507 "IPL-6860*", "*"
508 },
509 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
510 },
511 {
512 /*
513 * These Hitachi drives don't like multi-lun probing.
514 * The PR submitter has a DK319H, but says that the Linux
515 * kernel has a similar work-around for the DK312 and DK314,
516 * so all DK31* drives are quirked here.
517 * PR: misc/18793
518 * Submitted by: Paul Haddad <paul@pth.com>
519 */
520 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK31*", "*" },
521 CAM_QUIRK_NOLUNS, /*mintags*/2, /*maxtags*/255
522 },
523 {
524 /*
525 * The Hitachi CJ series with J8A8 firmware apparantly has
526 * problems with tagged commands.
527 * PR: 23536
528 * Reported by: amagai@nue.org
529 */
530 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "DK32CJ*", "J8A8" },
531 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
532 },
533 {
534 /*
535 * These are the large storage arrays.
536 * Submitted by: William Carrel <william.carrel@infospace.com>
537 */
538 { T_DIRECT, SIP_MEDIA_FIXED, "HITACHI", "OPEN*", "*" },
539 CAM_QUIRK_HILUNS, 2, 1024
540 },
541 {
542 /*
543 * This old revision of the TDC3600 is also SCSI-1, and
544 * hangs upon serial number probing.
545 */
546 {
547 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "TANDBERG",
548 " TDC 3600", "U07:"
549 },
550 CAM_QUIRK_NOSERIAL, /*mintags*/0, /*maxtags*/0
551 },
552 {
553 /*
554 * Would repond to all LUNs if asked for.
555 */
556 {
557 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "CALIPER",
558 "CP150", "*"
559 },
560 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
561 },
562 {
563 /*
564 * Would repond to all LUNs if asked for.
565 */
566 {
567 T_SEQUENTIAL, SIP_MEDIA_REMOVABLE, "KENNEDY",
568 "96X2*", "*"
569 },
570 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
571 },
572 {
573 /* Submitted by: Matthew Dodd <winter@jurai.net> */
574 { T_PROCESSOR, SIP_MEDIA_FIXED, "Cabletrn", "EA41*", "*" },
575 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
576 },
577 {
578 /* Submitted by: Matthew Dodd <winter@jurai.net> */
579 { T_PROCESSOR, SIP_MEDIA_FIXED, "CABLETRN", "EA41*", "*" },
580 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
581 },
582 {
583 /* TeraSolutions special settings for TRC-22 RAID */
584 { T_DIRECT, SIP_MEDIA_FIXED, "TERASOLU", "TRC-22", "*" },
585 /*quirks*/0, /*mintags*/55, /*maxtags*/255
586 },
587 {
588 /* Veritas Storage Appliance */
589 { T_DIRECT, SIP_MEDIA_FIXED, "VERITAS", "*", "*" },
590 CAM_QUIRK_HILUNS, /*mintags*/2, /*maxtags*/1024
591 },
592 {
593 /*
594 * Would respond to all LUNs. Device type and removable
595 * flag are jumper-selectable.
596 */
597 { T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, "MaxOptix",
598 "Tahiti 1", "*"
599 },
600 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
601 },
602 {
603 /* EasyRAID E5A aka. areca ARC-6010 */
604 { T_DIRECT, SIP_MEDIA_FIXED, "easyRAID", "*", "*" },
605 CAM_QUIRK_NOHILUNS, /*mintags*/2, /*maxtags*/255
606 },
607 {
608 { T_ENCLOSURE, SIP_MEDIA_FIXED, "DP", "BACKPLANE", "*" },
609 CAM_QUIRK_NOLUNS, /*mintags*/0, /*maxtags*/0
610 },
611 {
612 /* Default tagged queuing parameters for all devices */
613 {
614 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED,
615 /*vendor*/"*", /*product*/"*", /*revision*/"*"
616 },
617 /*quirks*/0, /*mintags*/2, /*maxtags*/255
618 },
619};
620
621static const int xpt_quirk_table_size =
622 sizeof(xpt_quirk_table) / sizeof(*xpt_quirk_table);
623
624typedef enum {
625 DM_RET_COPY = 0x01,
626 DM_RET_FLAG_MASK = 0x0f,
627 DM_RET_NONE = 0x00,
628 DM_RET_STOP = 0x10,
629 DM_RET_DESCEND = 0x20,
630 DM_RET_ERROR = 0x30,
631 DM_RET_ACTION_MASK = 0xf0
632} dev_match_ret;
633
634typedef enum {
635 XPT_DEPTH_BUS,
636 XPT_DEPTH_TARGET,
637 XPT_DEPTH_DEVICE,
638 XPT_DEPTH_PERIPH
639} xpt_traverse_depth;
640
641struct xpt_traverse_config {
642 xpt_traverse_depth depth;
643 void *tr_func;
644 void *tr_arg;
645};
646
647typedef int xpt_busfunc_t (struct cam_eb *bus, void *arg);
648typedef int xpt_targetfunc_t (struct cam_et *target, void *arg);
649typedef int xpt_devicefunc_t (struct cam_ed *device, void *arg);
650typedef int xpt_periphfunc_t (struct cam_periph *periph, void *arg);
651typedef int xpt_pdrvfunc_t (struct periph_driver **pdrv, void *arg);
652
653/* Transport layer configuration information */
654static struct xpt_softc xsoftc;
655
656/* Queues for our software interrupt handler */
657typedef TAILQ_HEAD(cam_isrq, ccb_hdr) cam_isrq_t;
658typedef TAILQ_HEAD(cam_simq, cam_sim) cam_simq_t;
659static cam_simq_t cam_simq;
660static struct spinlock cam_simq_spin;
661
662struct cam_periph *xpt_periph;
663
664static periph_init_t xpt_periph_init;
665
666static periph_init_t probe_periph_init;
667
668static struct periph_driver xpt_driver =
669{
670 xpt_periph_init, "xpt",
671 TAILQ_HEAD_INITIALIZER(xpt_driver.units)
672};
673
674static struct periph_driver probe_driver =
675{
676 probe_periph_init, "probe",
677 TAILQ_HEAD_INITIALIZER(probe_driver.units)
678};
679
680PERIPHDRIVER_DECLARE(xpt, xpt_driver);
681PERIPHDRIVER_DECLARE(probe, probe_driver);
682
683#define XPT_CDEV_MAJOR 104
684
685static d_open_t xptopen;
686static d_close_t xptclose;
687static d_ioctl_t xptioctl;
688
689static struct dev_ops xpt_ops = {
690 { "xpt", XPT_CDEV_MAJOR, 0 },
691 .d_open = xptopen,
692 .d_close = xptclose,
693 .d_ioctl = xptioctl
694};
695
696static void dead_sim_action(struct cam_sim *sim, union ccb *ccb);
697static void dead_sim_poll(struct cam_sim *sim);
698
699/* Dummy SIM that is used when the real one has gone. */
700static struct cam_sim cam_dead_sim;
701static struct lock cam_dead_lock;
702
703/* Storage for debugging datastructures */
704#ifdef CAMDEBUG
705struct cam_path *cam_dpath;
706u_int32_t cam_dflags;
707u_int32_t cam_debug_delay;
708#endif
709
710#if defined(CAM_DEBUG_FLAGS) && !defined(CAMDEBUG)
711#error "You must have options CAMDEBUG to use options CAM_DEBUG_FLAGS"
712#endif
713
714/*
715 * In order to enable the CAM_DEBUG_* options, the user must have CAMDEBUG
716 * enabled. Also, the user must have either none, or all of CAM_DEBUG_BUS,
717 * CAM_DEBUG_TARGET, and CAM_DEBUG_LUN specified.
718 */
719#if defined(CAM_DEBUG_BUS) || defined(CAM_DEBUG_TARGET) \
720 || defined(CAM_DEBUG_LUN)
721#ifdef CAMDEBUG
722#if !defined(CAM_DEBUG_BUS) || !defined(CAM_DEBUG_TARGET) \
723 || !defined(CAM_DEBUG_LUN)
724#error "You must define all or none of CAM_DEBUG_BUS, CAM_DEBUG_TARGET \
725 and CAM_DEBUG_LUN"
726#endif /* !CAM_DEBUG_BUS || !CAM_DEBUG_TARGET || !CAM_DEBUG_LUN */
727#else /* !CAMDEBUG */
728#error "You must use options CAMDEBUG if you use the CAM_DEBUG_* options"
729#endif /* CAMDEBUG */
730#endif /* CAM_DEBUG_BUS || CAM_DEBUG_TARGET || CAM_DEBUG_LUN */
731
732/* Our boot-time initialization hook */
733static int cam_module_event_handler(module_t, int /*modeventtype_t*/, void *);
734
735static moduledata_t cam_moduledata = {
736 "cam",
737 cam_module_event_handler,
738 NULL
739};
740
741static int xpt_init(void *);
742
743DECLARE_MODULE(cam, cam_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND);
744MODULE_VERSION(cam, 1);
745
746
747static cam_status xpt_compile_path(struct cam_path *new_path,
748 struct cam_periph *perph,
749 path_id_t path_id,
750 target_id_t target_id,
751 lun_id_t lun_id);
752
753static void xpt_release_path(struct cam_path *path);
754
755static void xpt_async_bcast(struct async_list *async_head,
756 u_int32_t async_code,
757 struct cam_path *path,
758 void *async_arg);
759static void xpt_dev_async(u_int32_t async_code,
760 struct cam_eb *bus,
761 struct cam_et *target,
762 struct cam_ed *device,
763 void *async_arg);
764static path_id_t xptnextfreepathid(void);
765static path_id_t xptpathid(const char *sim_name, int sim_unit, int sim_bus);
766static union ccb *xpt_get_ccb(struct cam_ed *device);
767static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
768 u_int32_t new_priority);
769static void xpt_run_dev_allocq(struct cam_eb *bus);
770static void xpt_run_dev_sendq(struct cam_eb *bus);
771static timeout_t xpt_release_devq_timeout;
772static void xpt_release_bus(struct cam_eb *bus);
773static void xpt_release_devq_device(struct cam_ed *dev, u_int count,
774 int run_queue);
775static struct cam_et*
776 xpt_alloc_target(struct cam_eb *bus, target_id_t target_id);
777static void xpt_release_target(struct cam_eb *bus, struct cam_et *target);
778static struct cam_ed*
779 xpt_alloc_device(struct cam_eb *bus, struct cam_et *target,
780 lun_id_t lun_id);
781static void xpt_release_device(struct cam_eb *bus, struct cam_et *target,
782 struct cam_ed *device);
783static u_int32_t xpt_dev_ccbq_resize(struct cam_path *path, int newopenings);
784static struct cam_eb*
785 xpt_find_bus(path_id_t path_id);
786static struct cam_et*
787 xpt_find_target(struct cam_eb *bus, target_id_t target_id);
788static struct cam_ed*
789 xpt_find_device(struct cam_et *target, lun_id_t lun_id);
790static void xpt_scan_bus(struct cam_periph *periph, union ccb *ccb);
791static void xpt_scan_lun(struct cam_periph *periph,
792 struct cam_path *path, cam_flags flags,
793 union ccb *ccb);
794static void xptscandone(struct cam_periph *periph, union ccb *done_ccb);
795static xpt_busfunc_t xptconfigbuscountfunc;
796static xpt_busfunc_t xptconfigfunc;
797static void xpt_config(void *arg);
798static xpt_devicefunc_t xptpassannouncefunc;
799static void xpt_finishconfig(struct cam_periph *periph, union ccb *ccb);
800static void xptaction(struct cam_sim *sim, union ccb *work_ccb);
801static void xptpoll(struct cam_sim *sim);
802static inthand2_t swi_cambio;
803static void camisr(void *);
804static void camisr_runqueue(struct cam_sim *);
805static dev_match_ret xptbusmatch(struct dev_match_pattern *patterns,
806 u_int num_patterns, struct cam_eb *bus);
807static dev_match_ret xptdevicematch(struct dev_match_pattern *patterns,
808 u_int num_patterns,
809 struct cam_ed *device);
810static dev_match_ret xptperiphmatch(struct dev_match_pattern *patterns,
811 u_int num_patterns,
812 struct cam_periph *periph);
813static xpt_busfunc_t xptedtbusfunc;
814static xpt_targetfunc_t xptedttargetfunc;
815static xpt_devicefunc_t xptedtdevicefunc;
816static xpt_periphfunc_t xptedtperiphfunc;
817static xpt_pdrvfunc_t xptplistpdrvfunc;
818static xpt_periphfunc_t xptplistperiphfunc;
819static int xptedtmatch(struct ccb_dev_match *cdm);
820static int xptperiphlistmatch(struct ccb_dev_match *cdm);
821static int xptbustraverse(struct cam_eb *start_bus,
822 xpt_busfunc_t *tr_func, void *arg);
823static int xpttargettraverse(struct cam_eb *bus,
824 struct cam_et *start_target,
825 xpt_targetfunc_t *tr_func, void *arg);
826static int xptdevicetraverse(struct cam_et *target,
827 struct cam_ed *start_device,
828 xpt_devicefunc_t *tr_func, void *arg);
829static int xptperiphtraverse(struct cam_ed *device,
830 struct cam_periph *start_periph,
831 xpt_periphfunc_t *tr_func, void *arg);
832static int xptpdrvtraverse(struct periph_driver **start_pdrv,
833 xpt_pdrvfunc_t *tr_func, void *arg);
834static int xptpdperiphtraverse(struct periph_driver **pdrv,
835 struct cam_periph *start_periph,
836 xpt_periphfunc_t *tr_func,
837 void *arg);
838static xpt_busfunc_t xptdefbusfunc;
839static xpt_targetfunc_t xptdeftargetfunc;
840static xpt_devicefunc_t xptdefdevicefunc;
841static xpt_periphfunc_t xptdefperiphfunc;
842static int xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg);
843static int xpt_for_all_devices(xpt_devicefunc_t *tr_func,
844 void *arg);
845static xpt_devicefunc_t xptsetasyncfunc;
846static xpt_busfunc_t xptsetasyncbusfunc;
847static cam_status xptregister(struct cam_periph *periph,
848 void *arg);
849static cam_status proberegister(struct cam_periph *periph,
850 void *arg);
851static void probeschedule(struct cam_periph *probe_periph);
852static void probestart(struct cam_periph *periph, union ccb *start_ccb);
853static void proberequestdefaultnegotiation(struct cam_periph *periph);
854static int proberequestbackoff(struct cam_periph *periph,
855 struct cam_ed *device);
856static void probedone(struct cam_periph *periph, union ccb *done_ccb);
857static void probecleanup(struct cam_periph *periph);
858static void xpt_find_quirk(struct cam_ed *device);
859static void xpt_devise_transport(struct cam_path *path);
860static void xpt_set_transfer_settings(struct ccb_trans_settings *cts,
861 struct cam_ed *device,
862 int async_update);
863static void xpt_toggle_tags(struct cam_path *path);
864static void xpt_start_tags(struct cam_path *path);
865static __inline int xpt_schedule_dev_allocq(struct cam_eb *bus,
866 struct cam_ed *dev);
867static __inline int xpt_schedule_dev_sendq(struct cam_eb *bus,
868 struct cam_ed *dev);
869static __inline int periph_is_queued(struct cam_periph *periph);
870static __inline int device_is_alloc_queued(struct cam_ed *device);
871static __inline int device_is_send_queued(struct cam_ed *device);
872static __inline int dev_allocq_is_runnable(struct cam_devq *devq);
873
874static __inline int
875xpt_schedule_dev_allocq(struct cam_eb *bus, struct cam_ed *dev)
876{
877 int retval;
878
879 if (bus->sim->devq && dev->ccbq.devq_openings > 0) {
880 if ((dev->flags & CAM_DEV_RESIZE_QUEUE_NEEDED) != 0) {
881 cam_ccbq_resize(&dev->ccbq,
882 dev->ccbq.dev_openings
883 + dev->ccbq.dev_active);
884 dev->flags &= ~CAM_DEV_RESIZE_QUEUE_NEEDED;
885 }
886 /*
887 * The priority of a device waiting for CCB resources
888 * is that of the the highest priority peripheral driver
889 * enqueued.
890 */
891 retval = xpt_schedule_dev(&bus->sim->devq->alloc_queue,
892 &dev->alloc_ccb_entry.pinfo,
893 CAMQ_GET_HEAD(&dev->drvq)->priority);
894 } else {
895 retval = 0;
896 }
897
898 return (retval);
899}
900
901static __inline int
902xpt_schedule_dev_sendq(struct cam_eb *bus, struct cam_ed *dev)
903{
904 int retval;
905
906 if (bus->sim->devq && dev->ccbq.dev_openings > 0) {
907 /*
908 * The priority of a device waiting for controller
909 * resources is that of the the highest priority CCB
910 * enqueued.
911 */
912 retval =
913 xpt_schedule_dev(&bus->sim->devq->send_queue,
914 &dev->send_ccb_entry.pinfo,
915 CAMQ_GET_HEAD(&dev->ccbq.queue)->priority);
916 } else {
917 retval = 0;
918 }
919 return (retval);
920}
921
922static __inline int
923periph_is_queued(struct cam_periph *periph)
924{
925 return (periph->pinfo.index != CAM_UNQUEUED_INDEX);
926}
927
928static __inline int
929device_is_alloc_queued(struct cam_ed *device)
930{
931 return (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
932}
933
934static __inline int
935device_is_send_queued(struct cam_ed *device)
936{
937 return (device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX);
938}
939
940static __inline int
941dev_allocq_is_runnable(struct cam_devq *devq)
942{
943 /*
944 * Have work to do.
945 * Have space to do more work.
946 * Allowed to do work.
947 */
948 return ((devq->alloc_queue.qfrozen_cnt == 0)
949 && (devq->alloc_queue.entries > 0)
950 && (devq->alloc_openings > 0));
951}
952
953static void
954xpt_periph_init(void)
955{
956 dev_ops_add(&xpt_ops, 0, 0);
957 make_dev(&xpt_ops, 0, UID_ROOT, GID_OPERATOR, 0600, "xpt0");
958}
959
960static void
961probe_periph_init(void)
962{
963}
964
965
966static void
967xptdone(struct cam_periph *periph, union ccb *done_ccb)
968{
969 /* Caller will release the CCB */
970 wakeup(&done_ccb->ccb_h.cbfcnp);
971}
972
973static int
974xptopen(struct dev_open_args *ap)
975{
976 cdev_t dev = ap->a_head.a_dev;
977
978 /*
979 * Only allow read-write access.
980 */
981 if (((ap->a_oflags & FWRITE) == 0) || ((ap->a_oflags & FREAD) == 0))
982 return(EPERM);
983
984 /*
985 * We don't allow nonblocking access.
986 */
987 if ((ap->a_oflags & O_NONBLOCK) != 0) {
988 kprintf("%s: can't do nonblocking access\n", devtoname(dev));
989 return(ENODEV);
990 }
991
992 /* Mark ourselves open */
993 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
994 xsoftc.flags |= XPT_FLAG_OPEN;
995 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
996
997 return(0);
998}
999
1000static int
1001xptclose(struct dev_close_args *ap)
1002{
1003
1004 /* Mark ourselves closed */
1005 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
1006 xsoftc.flags &= ~XPT_FLAG_OPEN;
1007 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
1008
1009 return(0);
1010}
1011
1012/*
1013 * Don't automatically grab the xpt softc lock here even though this is going
1014 * through the xpt device. The xpt device is really just a back door for
1015 * accessing other devices and SIMs, so the right thing to do is to grab
1016 * the appropriate SIM lock once the bus/SIM is located.
1017 */
1018static int
1019xptioctl(struct dev_ioctl_args *ap)
1020{
1021 int error;
1022
1023 error = 0;
1024
1025 switch(ap->a_cmd) {
1026 /*
1027 * For the transport layer CAMIOCOMMAND ioctl, we really only want
1028 * to accept CCB types that don't quite make sense to send through a
1029 * passthrough driver.
1030 */
1031 case CAMIOCOMMAND: {
1032 union ccb *ccb;
1033 union ccb *inccb;
1034 struct cam_eb *bus;
1035
1036 inccb = (union ccb *)ap->a_data;
1037
1038 bus = xpt_find_bus(inccb->ccb_h.path_id);
1039 if (bus == NULL) {
1040 error = EINVAL;
1041 break;
1042 }
1043
1044 switch(inccb->ccb_h.func_code) {
1045 case XPT_SCAN_BUS:
1046 case XPT_RESET_BUS:
1047 if ((inccb->ccb_h.target_id != CAM_TARGET_WILDCARD)
1048 || (inccb->ccb_h.target_lun != CAM_LUN_WILDCARD)) {
1049 error = EINVAL;
1050 break;
1051 }
1052 /* FALLTHROUGH */
1053 case XPT_PATH_INQ:
1054 case XPT_ENG_INQ:
1055 case XPT_SCAN_LUN:
1056
1057 ccb = xpt_alloc_ccb();
1058
1059 CAM_SIM_LOCK(bus->sim);
1060
1061 /*
1062 * Create a path using the bus, target, and lun the
1063 * user passed in.
1064 */
1065 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
1066 inccb->ccb_h.path_id,
1067 inccb->ccb_h.target_id,
1068 inccb->ccb_h.target_lun) !=
1069 CAM_REQ_CMP){
1070 error = EINVAL;
1071 CAM_SIM_UNLOCK(bus->sim);
1072 xpt_free_ccb(ccb);
1073 break;
1074 }
1075 /* Ensure all of our fields are correct */
1076 xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path,
1077 inccb->ccb_h.pinfo.priority);
1078 xpt_merge_ccb(ccb, inccb);
1079 ccb->ccb_h.cbfcnp = xptdone;
1080 cam_periph_runccb(ccb, NULL, 0, 0, NULL);
1081 bcopy(ccb, inccb, sizeof(union ccb));
1082 xpt_free_path(ccb->ccb_h.path);
1083 xpt_free_ccb(ccb);
1084 CAM_SIM_UNLOCK(bus->sim);
1085 break;
1086
1087 case XPT_DEBUG: {
1088 union ccb ccb;
1089
1090 /*
1091 * This is an immediate CCB, so it's okay to
1092 * allocate it on the stack.
1093 */
1094
1095 CAM_SIM_LOCK(bus->sim);
1096
1097 /*
1098 * Create a path using the bus, target, and lun the
1099 * user passed in.
1100 */
1101 if (xpt_create_path(&ccb.ccb_h.path, xpt_periph,
1102 inccb->ccb_h.path_id,
1103 inccb->ccb_h.target_id,
1104 inccb->ccb_h.target_lun) !=
1105 CAM_REQ_CMP){
1106 error = EINVAL;
1107 CAM_SIM_UNLOCK(bus->sim);
1108 break;
1109 }
1110 /* Ensure all of our fields are correct */
1111 xpt_setup_ccb(&ccb.ccb_h, ccb.ccb_h.path,
1112 inccb->ccb_h.pinfo.priority);
1113 xpt_merge_ccb(&ccb, inccb);
1114 ccb.ccb_h.cbfcnp = xptdone;
1115 xpt_action(&ccb);
1116 CAM_SIM_UNLOCK(bus->sim);
1117 bcopy(&ccb, inccb, sizeof(union ccb));
1118 xpt_free_path(ccb.ccb_h.path);
1119 break;
1120
1121 }
1122 case XPT_DEV_MATCH: {
1123 struct cam_periph_map_info mapinfo;
1124 struct cam_path *old_path;
1125
1126 /*
1127 * We can't deal with physical addresses for this
1128 * type of transaction.
1129 */
1130 if (inccb->ccb_h.flags & CAM_DATA_PHYS) {
1131 error = EINVAL;
1132 break;
1133 }
1134
1135 /*
1136 * Save this in case the caller had it set to
1137 * something in particular.
1138 */
1139 old_path = inccb->ccb_h.path;
1140
1141 /*
1142 * We really don't need a path for the matching
1143 * code. The path is needed because of the
1144 * debugging statements in xpt_action(). They
1145 * assume that the CCB has a valid path.
1146 */
1147 inccb->ccb_h.path = xpt_periph->path;
1148
1149 bzero(&mapinfo, sizeof(mapinfo));
1150
1151 /*
1152 * Map the pattern and match buffers into kernel
1153 * virtual address space.
1154 */
1155 error = cam_periph_mapmem(inccb, &mapinfo);
1156
1157 if (error) {
1158 inccb->ccb_h.path = old_path;
1159 break;
1160 }
1161
1162 /*
1163 * This is an immediate CCB, we can send it on directly.
1164 */
1165 xpt_action(inccb);
1166
1167 /*
1168 * Map the buffers back into user space.
1169 */
1170 cam_periph_unmapmem(inccb, &mapinfo);
1171
1172 inccb->ccb_h.path = old_path;
1173
1174 error = 0;
1175 break;
1176 }
1177 default:
1178 error = ENOTSUP;
1179 break;
1180 }
1181 xpt_release_bus(bus);
1182 break;
1183 }
1184 /*
1185 * This is the getpassthru ioctl. It takes a XPT_GDEVLIST ccb as input,
1186 * with the periphal driver name and unit name filled in. The other
1187 * fields don't really matter as input. The passthrough driver name
1188 * ("pass"), and unit number are passed back in the ccb. The current
1189 * device generation number, and the index into the device peripheral
1190 * driver list, and the status are also passed back. Note that
1191 * since we do everything in one pass, unlike the XPT_GDEVLIST ccb,
1192 * we never return a status of CAM_GDEVLIST_LIST_CHANGED. It is
1193 * (or rather should be) impossible for the device peripheral driver
1194 * list to change since we look at the whole thing in one pass, and
1195 * we do it with lock protection.
1196 *
1197 */
1198 case CAMGETPASSTHRU: {
1199 union ccb *ccb;
1200 struct cam_periph *periph;
1201 struct periph_driver **p_drv;
1202 char *name;
1203 u_int unit;
1204 u_int cur_generation;
1205 int base_periph_found;
1206 int splbreaknum;
1207
1208 ccb = (union ccb *)ap->a_data;
1209 unit = ccb->cgdl.unit_number;
1210 name = ccb->cgdl.periph_name;
1211 /*
1212 * Every 100 devices, we want to drop our lock protection to
1213 * give the software interrupt handler a chance to run.
1214 * Most systems won't run into this check, but this should
1215 * avoid starvation in the software interrupt handler in
1216 * large systems.
1217 */
1218 splbreaknum = 100;
1219
1220 ccb = (union ccb *)ap->a_data;
1221
1222 base_periph_found = 0;
1223
1224 /*
1225 * Sanity check -- make sure we don't get a null peripheral
1226 * driver name.
1227 */
1228 if (*ccb->cgdl.periph_name == '\0') {
1229 error = EINVAL;
1230 break;
1231 }
1232
1233 /* Keep the list from changing while we traverse it */
1234 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1235ptstartover:
1236 cur_generation = xsoftc.xpt_generation;
1237
1238 /* first find our driver in the list of drivers */
1239 for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
1240 if (strcmp((*p_drv)->driver_name, name) == 0)
1241 break;
1242 }
1243
1244 if (*p_drv == NULL) {
1245 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1246 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1247 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1248 *ccb->cgdl.periph_name = '\0';
1249 ccb->cgdl.unit_number = 0;
1250 error = ENOENT;
1251 break;
1252 }
1253
1254 /*
1255 * Run through every peripheral instance of this driver
1256 * and check to see whether it matches the unit passed
1257 * in by the user. If it does, get out of the loops and
1258 * find the passthrough driver associated with that
1259 * peripheral driver.
1260 */
1261 TAILQ_FOREACH(periph, &(*p_drv)->units, unit_links) {
1262
1263 if (periph->unit_number == unit) {
1264 break;
1265 } else if (--splbreaknum == 0) {
1266 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1267 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1268 splbreaknum = 100;
1269 if (cur_generation != xsoftc.xpt_generation)
1270 goto ptstartover;
1271 }
1272 }
1273 /*
1274 * If we found the peripheral driver that the user passed
1275 * in, go through all of the peripheral drivers for that
1276 * particular device and look for a passthrough driver.
1277 */
1278 if (periph != NULL) {
1279 struct cam_ed *device;
1280 int i;
1281
1282 base_periph_found = 1;
1283 device = periph->path->device;
1284 for (i = 0, periph = SLIST_FIRST(&device->periphs);
1285 periph != NULL;
1286 periph = SLIST_NEXT(periph, periph_links), i++) {
1287 /*
1288 * Check to see whether we have a
1289 * passthrough device or not.
1290 */
1291 if (strcmp(periph->periph_name, "pass") == 0) {
1292 /*
1293 * Fill in the getdevlist fields.
1294 */
1295 strcpy(ccb->cgdl.periph_name,
1296 periph->periph_name);
1297 ccb->cgdl.unit_number =
1298 periph->unit_number;
1299 if (SLIST_NEXT(periph, periph_links))
1300 ccb->cgdl.status =
1301 CAM_GDEVLIST_MORE_DEVS;
1302 else
1303 ccb->cgdl.status =
1304 CAM_GDEVLIST_LAST_DEVICE;
1305 ccb->cgdl.generation =
1306 device->generation;
1307 ccb->cgdl.index = i;
1308 /*
1309 * Fill in some CCB header fields
1310 * that the user may want.
1311 */
1312 ccb->ccb_h.path_id =
1313 periph->path->bus->path_id;
1314 ccb->ccb_h.target_id =
1315 periph->path->target->target_id;
1316 ccb->ccb_h.target_lun =
1317 periph->path->device->lun_id;
1318 ccb->ccb_h.status = CAM_REQ_CMP;
1319 break;
1320 }
1321 }
1322 }
1323
1324 /*
1325 * If the periph is null here, one of two things has
1326 * happened. The first possibility is that we couldn't
1327 * find the unit number of the particular peripheral driver
1328 * that the user is asking about. e.g. the user asks for
1329 * the passthrough driver for "da11". We find the list of
1330 * "da" peripherals all right, but there is no unit 11.
1331 * The other possibility is that we went through the list
1332 * of peripheral drivers attached to the device structure,
1333 * but didn't find one with the name "pass". Either way,
1334 * we return ENOENT, since we couldn't find something.
1335 */
1336 if (periph == NULL) {
1337 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1338 ccb->cgdl.status = CAM_GDEVLIST_ERROR;
1339 *ccb->cgdl.periph_name = '\0';
1340 ccb->cgdl.unit_number = 0;
1341 error = ENOENT;
1342 /*
1343 * It is unfortunate that this is even necessary,
1344 * but there are many, many clueless users out there.
1345 * If this is true, the user is looking for the
1346 * passthrough driver, but doesn't have one in his
1347 * kernel.
1348 */
1349 if (base_periph_found == 1) {
1350 kprintf("xptioctl: pass driver is not in the "
1351 "kernel\n");
1352 kprintf("xptioctl: put \"device pass\" in "
1353 "your kernel config file\n");
1354 }
1355 }
1356 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1357 break;
1358 }
1359 default:
1360 error = ENOTTY;
1361 break;
1362 }
1363
1364 return(error);
1365}
1366
1367static int
1368cam_module_event_handler(module_t mod, int what, void *arg)
1369{
1370 int error;
1371
1372 switch (what) {
1373 case MOD_LOAD:
1374 if ((error = xpt_init(NULL)) != 0)
1375 return (error);
1376 break;
1377 case MOD_UNLOAD:
1378 return EBUSY;
1379 default:
1380 return EOPNOTSUPP;
1381 }
1382
1383 return 0;
1384}
1385
1386/*
1387 * Thread to handle asynchronous main-context requests.
1388 *
1389 * This function is typically used by drivers to perform complex actions
1390 * such as bus scans and engineering requests in a main context instead
1391 * of an interrupt context.
1392 */
1393static void
1394xpt_scanner_thread(void *dummy)
1395{
1396 union ccb *ccb;
1397#if 0
1398 struct cam_sim *sim;
1399#endif
1400
1401 for (;;) {
1402 xpt_lock_buses();
1403 xsoftc.ccb_scanq_running = 1;
1404 while ((ccb = (void *)TAILQ_FIRST(&xsoftc.ccb_scanq)) != NULL) {
1405 TAILQ_REMOVE(&xsoftc.ccb_scanq, &ccb->ccb_h,
1406 sim_links.tqe);
1407 xpt_unlock_buses();
1408#if 0
1409 sim = ccb->ccb_h.path->bus->sim;
1410 CAM_SIM_LOCK(sim);
1411#endif
1412 xpt_action(ccb);
1413#if 0
1414 CAM_SIM_UNLOCK(sim);
1415 xpt_lock_buses();
1416#endif
1417 }
1418 xsoftc.ccb_scanq_running = 0;
1419 crit_enter();
1420 tsleep_interlock(&xsoftc.ccb_scanq);
1421 xpt_unlock_buses();
1422 tsleep(&xsoftc.ccb_scanq, 0, "ccb_scanq", 0);
1423 crit_exit();
1424 }
1425}
1426
1427/*
1428 * Issue an asynchronous asction
1429 */
1430void
1431xpt_action_async(union ccb *ccb)
1432{
1433 xpt_lock_buses();
1434 TAILQ_INSERT_TAIL(&xsoftc.ccb_scanq, &ccb->ccb_h, sim_links.tqe);
1435 if (xsoftc.ccb_scanq_running == 0) {
1436 xsoftc.ccb_scanq_running = 1;
1437 wakeup(&xsoftc.ccb_scanq);
1438 }
1439 xpt_unlock_buses();
1440}
1441
1442
1443/* Functions accessed by the peripheral drivers */
1444static int
1445xpt_init(void *dummy)
1446{
1447 struct cam_sim *xpt_sim;
1448 struct cam_path *path;
1449 struct cam_devq *devq;
1450 cam_status status;
1451
1452 TAILQ_INIT(&xsoftc.xpt_busses);
1453 TAILQ_INIT(&cam_simq);
1454 TAILQ_INIT(&xsoftc.ccb_scanq);
1455 STAILQ_INIT(&xsoftc.highpowerq);
1456 xsoftc.num_highpower = CAM_MAX_HIGHPOWER;
1457
1458 spin_init(&cam_simq_spin);
1459 lockinit(&xsoftc.xpt_lock, "XPT lock", 0, LK_CANRECURSE);
1460 lockinit(&xsoftc.xpt_topo_lock, "XPT topology lock", 0, LK_CANRECURSE);
1461
1462 SLIST_INIT(&cam_dead_sim.ccb_freeq);
1463 TAILQ_INIT(&cam_dead_sim.sim_doneq);
1464 spin_init(&cam_dead_sim.sim_spin);
1465 cam_dead_sim.sim_action = dead_sim_action;
1466 cam_dead_sim.sim_poll = dead_sim_poll;
1467 cam_dead_sim.sim_name = "dead_sim";
1468 cam_dead_sim.lock = &cam_dead_lock;
1469 lockinit(&cam_dead_lock, "XPT dead_sim lock", 0, LK_CANRECURSE);
1470 cam_dead_sim.flags |= CAM_SIM_DEREGISTERED;
1471
1472 /*
1473 * The xpt layer is, itself, the equivelent of a SIM.
1474 * Allow 16 ccbs in the ccb pool for it. This should
1475 * give decent parallelism when we probe busses and
1476 * perform other XPT functions.
1477 */
1478 devq = cam_simq_alloc(16);
1479 xpt_sim = cam_sim_alloc(xptaction,
1480 xptpoll,
1481 "xpt",
1482 /*softc*/NULL,
1483 /*unit*/0,
1484 /*lock*/&xsoftc.xpt_lock,
1485 /*max_dev_transactions*/0,
1486 /*max_tagged_dev_transactions*/0,
1487 devq);
1488 cam_simq_release(devq);
1489 if (xpt_sim == NULL)
1490 return (ENOMEM);
1491
1492 xpt_sim->max_ccbs = 16;
1493
1494 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
1495 if ((status = xpt_bus_register(xpt_sim, /*bus #*/0)) != CAM_SUCCESS) {
1496 kprintf("xpt_init: xpt_bus_register failed with status %#x,"
1497 " failing attach\n", status);
1498 return (EINVAL);
1499 }
1500
1501 /*
1502 * Looking at the XPT from the SIM layer, the XPT is
1503 * the equivelent of a peripheral driver. Allocate
1504 * a peripheral driver entry for us.
1505 */
1506 if ((status = xpt_create_path(&path, NULL, CAM_XPT_PATH_ID,
1507 CAM_TARGET_WILDCARD,
1508 CAM_LUN_WILDCARD)) != CAM_REQ_CMP) {
1509 kprintf("xpt_init: xpt_create_path failed with status %#x,"
1510 " failing attach\n", status);
1511 return (EINVAL);
1512 }
1513
1514 cam_periph_alloc(xptregister, NULL, NULL, NULL, "xpt", CAM_PERIPH_BIO,
1515 path, NULL, 0, xpt_sim);
1516 xpt_free_path(path);
1517
1518 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
1519
1520 /*
1521 * Register a callback for when interrupts are enabled.
1522 */
1523 xsoftc.xpt_config_hook = kmalloc(sizeof(struct intr_config_hook),
1524 M_CAMXPT, M_INTWAIT | M_ZERO);
1525 xsoftc.xpt_config_hook->ich_func = xpt_config;
1526 xsoftc.xpt_config_hook->ich_desc = "xpt";
1527 xsoftc.xpt_config_hook->ich_order = 1000;
1528 if (config_intrhook_establish(xsoftc.xpt_config_hook) != 0) {
1529 kfree (xsoftc.xpt_config_hook, M_CAMXPT);
1530 kprintf("xpt_init: config_intrhook_establish failed "
1531 "- failing attach\n");
1532 }
1533
1534 /* fire up rescan thread */
1535 if (kthread_create(xpt_scanner_thread, NULL, NULL, "xpt_thrd")) {
1536 kprintf("xpt_init: failed to create rescan thread\n");
1537 }
1538 /* Install our software interrupt handlers */
1539 register_swi(SWI_CAMBIO, swi_cambio, NULL, "swi_cambio", NULL);
1540
1541 return (0);
1542}
1543
1544static cam_status
1545xptregister(struct cam_periph *periph, void *arg)
1546{
1547 struct cam_sim *xpt_sim;
1548
1549 if (periph == NULL) {
1550 kprintf("xptregister: periph was NULL!!\n");
1551 return(CAM_REQ_CMP_ERR);
1552 }
1553
1554 xpt_sim = (struct cam_sim *)arg;
1555 xpt_sim->softc = periph;
1556 xpt_periph = periph;
1557 periph->softc = NULL;
1558
1559 return(CAM_REQ_CMP);
1560}
1561
1562int32_t
1563xpt_add_periph(struct cam_periph *periph)
1564{
1565 struct cam_ed *device;
1566 int32_t status;
1567 struct periph_list *periph_head;
1568
1569 sim_lock_assert_owned(periph->sim->lock);
1570
1571 device = periph->path->device;
1572
1573 periph_head = &device->periphs;
1574
1575 status = CAM_REQ_CMP;
1576
1577 if (device != NULL) {
1578 /*
1579 * Make room for this peripheral
1580 * so it will fit in the queue
1581 * when it's scheduled to run
1582 */
1583 status = camq_resize(&device->drvq,
1584 device->drvq.array_size + 1);
1585
1586 device->generation++;
1587
1588 SLIST_INSERT_HEAD(periph_head, periph, periph_links);
1589 }
1590
1591 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1592 xsoftc.xpt_generation++;
1593 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1594
1595 return (status);
1596}
1597
1598void
1599xpt_remove_periph(struct cam_periph *periph)
1600{
1601 struct cam_ed *device;
1602
1603 sim_lock_assert_owned(periph->sim->lock);
1604
1605 device = periph->path->device;
1606
1607 if (device != NULL) {
1608 struct periph_list *periph_head;
1609
1610 periph_head = &device->periphs;
1611
1612 /* Release the slot for this peripheral */
1613 camq_resize(&device->drvq, device->drvq.array_size - 1);
1614
1615 device->generation++;
1616
1617 SLIST_REMOVE(periph_head, periph, cam_periph, periph_links);
1618 }
1619
1620 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
1621 xsoftc.xpt_generation++;
1622 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
1623}
1624
1625void
1626xpt_announce_periph(struct cam_periph *periph, char *announce_string)
1627{
1628 struct ccb_pathinq cpi;
1629 struct ccb_trans_settings cts;
1630 struct cam_path *path;
1631 u_int speed;
1632 u_int freq;
1633 u_int mb;
1634
1635 sim_lock_assert_owned(periph->sim->lock);
1636
1637 path = periph->path;
1638 /*
1639 * To ensure that this is printed in one piece,
1640 * mask out CAM interrupts.
1641 */
1642 kprintf("%s%d at %s%d bus %d target %d lun %d\n",
1643 periph->periph_name, periph->unit_number,
1644 path->bus->sim->sim_name,
1645 path->bus->sim->unit_number,
1646 path->bus->sim->bus_id,
1647 path->target->target_id,
1648 path->device->lun_id);
1649 kprintf("%s%d: ", periph->periph_name, periph->unit_number);
1650 scsi_print_inquiry(&path->device->inq_data);
1651 if (bootverbose && path->device->serial_num_len > 0) {
1652 /* Don't wrap the screen - print only the first 60 chars */
1653 kprintf("%s%d: Serial Number %.60s\n", periph->periph_name,
1654 periph->unit_number, path->device->serial_num);
1655 }
1656 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
1657 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
1658 cts.type = CTS_TYPE_CURRENT_SETTINGS;
1659 xpt_action((union ccb*)&cts);
1660 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1661 return;
1662 }
1663
1664 /* Ask the SIM for its base transfer speed */
1665 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
1666 cpi.ccb_h.func_code = XPT_PATH_INQ;
1667 xpt_action((union ccb *)&cpi);
1668
1669 speed = cpi.base_transfer_speed;
1670 freq = 0;
1671 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1672 struct ccb_trans_settings_spi *spi;
1673
1674 spi = &cts.xport_specific.spi;
1675 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0
1676 && spi->sync_offset != 0) {
1677 freq = scsi_calc_syncsrate(spi->sync_period);
1678 speed = freq;
1679 }
1680
1681 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0)
1682 speed *= (0x01 << spi->bus_width);
1683 }
1684 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1685 struct ccb_trans_settings_fc *fc = &cts.xport_specific.fc;
1686 if (fc->valid & CTS_FC_VALID_SPEED) {
1687 speed = fc->bitrate;
1688 }
1689 }
1690
1691 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SAS) {
1692 struct ccb_trans_settings_sas *sas = &cts.xport_specific.sas;
1693 if (sas->valid & CTS_SAS_VALID_SPEED) {
1694 speed = sas->bitrate;
1695 }
1696 }
1697
1698 mb = speed / 1000;
1699 if (mb > 0)
1700 kprintf("%s%d: %d.%03dMB/s transfers",
1701 periph->periph_name, periph->unit_number,
1702 mb, speed % 1000);
1703 else
1704 kprintf("%s%d: %dKB/s transfers", periph->periph_name,
1705 periph->unit_number, speed);
1706 /* Report additional information about SPI connections */
1707 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_SPI) {
1708 struct ccb_trans_settings_spi *spi;
1709
1710 spi = &cts.xport_specific.spi;
1711 if (freq != 0) {
1712 kprintf(" (%d.%03dMHz%s, offset %d", freq / 1000,
1713 freq % 1000,
1714 (spi->ppr_options & MSG_EXT_PPR_DT_REQ) != 0
1715 ? " DT" : "",
1716 spi->sync_offset);
1717 }
1718 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0
1719 && spi->bus_width > 0) {
1720 if (freq != 0) {
1721 kprintf(", ");
1722 } else {
1723 kprintf(" (");
1724 }
1725 kprintf("%dbit)", 8 * (0x01 << spi->bus_width));
1726 } else if (freq != 0) {
1727 kprintf(")");
1728 }
1729 }
1730 if (cts.ccb_h.status == CAM_REQ_CMP && cts.transport == XPORT_FC) {
1731 struct ccb_trans_settings_fc *fc;
1732
1733 fc = &cts.xport_specific.fc;
1734 if (fc->valid & CTS_FC_VALID_WWNN)
1735 kprintf(" WWNN 0x%llx", (long long) fc->wwnn);
1736 if (fc->valid & CTS_FC_VALID_WWPN)
1737 kprintf(" WWPN 0x%llx", (long long) fc->wwpn);
1738 if (fc->valid & CTS_FC_VALID_PORT)
1739 kprintf(" PortID 0x%x", fc->port);
1740 }
1741
1742 if (path->device->inq_flags & SID_CmdQue
1743 || path->device->flags & CAM_DEV_TAG_AFTER_COUNT) {
1744 kprintf("\n%s%d: Command Queueing Enabled",
1745 periph->periph_name, periph->unit_number);
1746 }
1747 kprintf("\n");
1748
1749 /*
1750 * We only want to print the caller's announce string if they've
1751 * passed one in..
1752 */
1753 if (announce_string != NULL)
1754 kprintf("%s%d: %s\n", periph->periph_name,
1755 periph->unit_number, announce_string);
1756}
1757
1758static dev_match_ret
1759xptbusmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1760 struct cam_eb *bus)
1761{
1762 dev_match_ret retval;
1763 int i;
1764
1765 retval = DM_RET_NONE;
1766
1767 /*
1768 * If we aren't given something to match against, that's an error.
1769 */
1770 if (bus == NULL)
1771 return(DM_RET_ERROR);
1772
1773 /*
1774 * If there are no match entries, then this bus matches no
1775 * matter what.
1776 */
1777 if ((patterns == NULL) || (num_patterns == 0))
1778 return(DM_RET_DESCEND | DM_RET_COPY);
1779
1780 for (i = 0; i < num_patterns; i++) {
1781 struct bus_match_pattern *cur_pattern;
1782
1783 /*
1784 * If the pattern in question isn't for a bus node, we
1785 * aren't interested. However, we do indicate to the
1786 * calling routine that we should continue descending the
1787 * tree, since the user wants to match against lower-level
1788 * EDT elements.
1789 */
1790 if (patterns[i].type != DEV_MATCH_BUS) {
1791 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1792 retval |= DM_RET_DESCEND;
1793 continue;
1794 }
1795
1796 cur_pattern = &patterns[i].pattern.bus_pattern;
1797
1798 /*
1799 * If they want to match any bus node, we give them any
1800 * device node.
1801 */
1802 if (cur_pattern->flags == BUS_MATCH_ANY) {
1803 /* set the copy flag */
1804 retval |= DM_RET_COPY;
1805
1806 /*
1807 * If we've already decided on an action, go ahead
1808 * and return.
1809 */
1810 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1811 return(retval);
1812 }
1813
1814 /*
1815 * Not sure why someone would do this...
1816 */
1817 if (cur_pattern->flags == BUS_MATCH_NONE)
1818 continue;
1819
1820 if (((cur_pattern->flags & BUS_MATCH_PATH) != 0)
1821 && (cur_pattern->path_id != bus->path_id))
1822 continue;
1823
1824 if (((cur_pattern->flags & BUS_MATCH_BUS_ID) != 0)
1825 && (cur_pattern->bus_id != bus->sim->bus_id))
1826 continue;
1827
1828 if (((cur_pattern->flags & BUS_MATCH_UNIT) != 0)
1829 && (cur_pattern->unit_number != bus->sim->unit_number))
1830 continue;
1831
1832 if (((cur_pattern->flags & BUS_MATCH_NAME) != 0)
1833 && (strncmp(cur_pattern->dev_name, bus->sim->sim_name,
1834 DEV_IDLEN) != 0))
1835 continue;
1836
1837 /*
1838 * If we get to this point, the user definitely wants
1839 * information on this bus. So tell the caller to copy the
1840 * data out.
1841 */
1842 retval |= DM_RET_COPY;
1843
1844 /*
1845 * If the return action has been set to descend, then we
1846 * know that we've already seen a non-bus matching
1847 * expression, therefore we need to further descend the tree.
1848 * This won't change by continuing around the loop, so we
1849 * go ahead and return. If we haven't seen a non-bus
1850 * matching expression, we keep going around the loop until
1851 * we exhaust the matching expressions. We'll set the stop
1852 * flag once we fall out of the loop.
1853 */
1854 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1855 return(retval);
1856 }
1857
1858 /*
1859 * If the return action hasn't been set to descend yet, that means
1860 * we haven't seen anything other than bus matching patterns. So
1861 * tell the caller to stop descending the tree -- the user doesn't
1862 * want to match against lower level tree elements.
1863 */
1864 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1865 retval |= DM_RET_STOP;
1866
1867 return(retval);
1868}
1869
1870static dev_match_ret
1871xptdevicematch(struct dev_match_pattern *patterns, u_int num_patterns,
1872 struct cam_ed *device)
1873{
1874 dev_match_ret retval;
1875 int i;
1876
1877 retval = DM_RET_NONE;
1878
1879 /*
1880 * If we aren't given something to match against, that's an error.
1881 */
1882 if (device == NULL)
1883 return(DM_RET_ERROR);
1884
1885 /*
1886 * If there are no match entries, then this device matches no
1887 * matter what.
1888 */
1889 if ((patterns == NULL) || (num_patterns == 0))
1890 return(DM_RET_DESCEND | DM_RET_COPY);
1891
1892 for (i = 0; i < num_patterns; i++) {
1893 struct device_match_pattern *cur_pattern;
1894
1895 /*
1896 * If the pattern in question isn't for a device node, we
1897 * aren't interested.
1898 */
1899 if (patterns[i].type != DEV_MATCH_DEVICE) {
1900 if ((patterns[i].type == DEV_MATCH_PERIPH)
1901 && ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE))
1902 retval |= DM_RET_DESCEND;
1903 continue;
1904 }
1905
1906 cur_pattern = &patterns[i].pattern.device_pattern;
1907
1908 /*
1909 * If they want to match any device node, we give them any
1910 * device node.
1911 */
1912 if (cur_pattern->flags == DEV_MATCH_ANY) {
1913 /* set the copy flag */
1914 retval |= DM_RET_COPY;
1915
1916
1917 /*
1918 * If we've already decided on an action, go ahead
1919 * and return.
1920 */
1921 if ((retval & DM_RET_ACTION_MASK) != DM_RET_NONE)
1922 return(retval);
1923 }
1924
1925 /*
1926 * Not sure why someone would do this...
1927 */
1928 if (cur_pattern->flags == DEV_MATCH_NONE)
1929 continue;
1930
1931 if (((cur_pattern->flags & DEV_MATCH_PATH) != 0)
1932 && (cur_pattern->path_id != device->target->bus->path_id))
1933 continue;
1934
1935 if (((cur_pattern->flags & DEV_MATCH_TARGET) != 0)
1936 && (cur_pattern->target_id != device->target->target_id))
1937 continue;
1938
1939 if (((cur_pattern->flags & DEV_MATCH_LUN) != 0)
1940 && (cur_pattern->target_lun != device->lun_id))
1941 continue;
1942
1943 if (((cur_pattern->flags & DEV_MATCH_INQUIRY) != 0)
1944 && (cam_quirkmatch((caddr_t)&device->inq_data,
1945 (caddr_t)&cur_pattern->inq_pat,
1946 1, sizeof(cur_pattern->inq_pat),
1947 scsi_static_inquiry_match) == NULL))
1948 continue;
1949
1950 /*
1951 * If we get to this point, the user definitely wants
1952 * information on this device. So tell the caller to copy
1953 * the data out.
1954 */
1955 retval |= DM_RET_COPY;
1956
1957 /*
1958 * If the return action has been set to descend, then we
1959 * know that we've already seen a peripheral matching
1960 * expression, therefore we need to further descend the tree.
1961 * This won't change by continuing around the loop, so we
1962 * go ahead and return. If we haven't seen a peripheral
1963 * matching expression, we keep going around the loop until
1964 * we exhaust the matching expressions. We'll set the stop
1965 * flag once we fall out of the loop.
1966 */
1967 if ((retval & DM_RET_ACTION_MASK) == DM_RET_DESCEND)
1968 return(retval);
1969 }
1970
1971 /*
1972 * If the return action hasn't been set to descend yet, that means
1973 * we haven't seen any peripheral matching patterns. So tell the
1974 * caller to stop descending the tree -- the user doesn't want to
1975 * match against lower level tree elements.
1976 */
1977 if ((retval & DM_RET_ACTION_MASK) == DM_RET_NONE)
1978 retval |= DM_RET_STOP;
1979
1980 return(retval);
1981}
1982
1983/*
1984 * Match a single peripheral against any number of match patterns.
1985 */
1986static dev_match_ret
1987xptperiphmatch(struct dev_match_pattern *patterns, u_int num_patterns,
1988 struct cam_periph *periph)
1989{
1990 dev_match_ret retval;
1991 int i;
1992
1993 /*
1994 * If we aren't given something to match against, that's an error.
1995 */
1996 if (periph == NULL)
1997 return(DM_RET_ERROR);
1998
1999 /*
2000 * If there are no match entries, then this peripheral matches no
2001 * matter what.
2002 */
2003 if ((patterns == NULL) || (num_patterns == 0))
2004 return(DM_RET_STOP | DM_RET_COPY);
2005
2006 /*
2007 * There aren't any nodes below a peripheral node, so there's no
2008 * reason to descend the tree any further.
2009 */
2010 retval = DM_RET_STOP;
2011
2012 for (i = 0; i < num_patterns; i++) {
2013 struct periph_match_pattern *cur_pattern;
2014
2015 /*
2016 * If the pattern in question isn't for a peripheral, we
2017 * aren't interested.
2018 */
2019 if (patterns[i].type != DEV_MATCH_PERIPH)
2020 continue;
2021
2022 cur_pattern = &patterns[i].pattern.periph_pattern;
2023
2024 /*
2025 * If they want to match on anything, then we will do so.
2026 */
2027 if (cur_pattern->flags == PERIPH_MATCH_ANY) {
2028 /* set the copy flag */
2029 retval |= DM_RET_COPY;
2030
2031 /*
2032 * We've already set the return action to stop,
2033 * since there are no nodes below peripherals in
2034 * the tree.
2035 */
2036 return(retval);
2037 }
2038
2039 /*
2040 * Not sure why someone would do this...
2041 */
2042 if (cur_pattern->flags == PERIPH_MATCH_NONE)
2043 continue;
2044
2045 if (((cur_pattern->flags & PERIPH_MATCH_PATH) != 0)
2046 && (cur_pattern->path_id != periph->path->bus->path_id))
2047 continue;
2048
2049 /*
2050 * For the target and lun id's, we have to make sure the
2051 * target and lun pointers aren't NULL. The xpt peripheral
2052 * has a wildcard target and device.
2053 */
2054 if (((cur_pattern->flags & PERIPH_MATCH_TARGET) != 0)
2055 && ((periph->path->target == NULL)
2056 ||(cur_pattern->target_id != periph->path->target->target_id)))
2057 continue;
2058
2059 if (((cur_pattern->flags & PERIPH_MATCH_LUN) != 0)
2060 && ((periph->path->device == NULL)
2061 || (cur_pattern->target_lun != periph->path->device->lun_id)))
2062 continue;
2063
2064 if (((cur_pattern->flags & PERIPH_MATCH_UNIT) != 0)
2065 && (cur_pattern->unit_number != periph->unit_number))
2066 continue;
2067
2068 if (((cur_pattern->flags & PERIPH_MATCH_NAME) != 0)
2069 && (strncmp(cur_pattern->periph_name, periph->periph_name,
2070 DEV_IDLEN) != 0))
2071 continue;
2072
2073 /*
2074 * If we get to this point, the user definitely wants
2075 * information on this peripheral. So tell the caller to
2076 * copy the data out.
2077 */
2078 retval |= DM_RET_COPY;
2079
2080 /*
2081 * The return action has already been set to stop, since
2082 * peripherals don't have any nodes below them in the EDT.
2083 */
2084 return(retval);
2085 }
2086
2087 /*
2088 * If we get to this point, the peripheral that was passed in
2089 * doesn't match any of the patterns.
2090 */
2091 return(retval);
2092}
2093
2094static int
2095xptedtbusfunc(struct cam_eb *bus, void *arg)
2096{
2097 struct ccb_dev_match *cdm;
2098 dev_match_ret retval;
2099
2100 cdm = (struct ccb_dev_match *)arg;
2101
2102 /*
2103 * If our position is for something deeper in the tree, that means
2104 * that we've already seen this node. So, we keep going down.
2105 */
2106 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2107 && (cdm->pos.cookie.bus == bus)
2108 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2109 && (cdm->pos.cookie.target != NULL))
2110 retval = DM_RET_DESCEND;
2111 else
2112 retval = xptbusmatch(cdm->patterns, cdm->num_patterns, bus);
2113
2114 /*
2115 * If we got an error, bail out of the search.
2116 */
2117 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2118 cdm->status = CAM_DEV_MATCH_ERROR;
2119 return(0);
2120 }
2121
2122 /*
2123 * If the copy flag is set, copy this bus out.
2124 */
2125 if (retval & DM_RET_COPY) {
2126 int spaceleft, j;
2127
2128 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2129 sizeof(struct dev_match_result));
2130
2131 /*
2132 * If we don't have enough space to put in another
2133 * match result, save our position and tell the
2134 * user there are more devices to check.
2135 */
2136 if (spaceleft < sizeof(struct dev_match_result)) {
2137 bzero(&cdm->pos, sizeof(cdm->pos));
2138 cdm->pos.position_type =
2139 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS;
2140
2141 cdm->pos.cookie.bus = bus;
2142 cdm->pos.generations[CAM_BUS_GENERATION]=
2143 xsoftc.bus_generation;
2144 cdm->status = CAM_DEV_MATCH_MORE;
2145 return(0);
2146 }
2147 j = cdm->num_matches;
2148 cdm->num_matches++;
2149 cdm->matches[j].type = DEV_MATCH_BUS;
2150 cdm->matches[j].result.bus_result.path_id = bus->path_id;
2151 cdm->matches[j].result.bus_result.bus_id = bus->sim->bus_id;
2152 cdm->matches[j].result.bus_result.unit_number =
2153 bus->sim->unit_number;
2154 strncpy(cdm->matches[j].result.bus_result.dev_name,
2155 bus->sim->sim_name, DEV_IDLEN);
2156 }
2157
2158 /*
2159 * If the user is only interested in busses, there's no
2160 * reason to descend to the next level in the tree.
2161 */
2162 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2163 return(1);
2164
2165 /*
2166 * If there is a target generation recorded, check it to
2167 * make sure the target list hasn't changed.
2168 */
2169 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2170 && (bus == cdm->pos.cookie.bus)
2171 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2172 && (cdm->pos.generations[CAM_TARGET_GENERATION] != 0)
2173 && (cdm->pos.generations[CAM_TARGET_GENERATION] !=
2174 bus->generation)) {
2175 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2176 return(0);
2177 }
2178
2179 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2180 && (cdm->pos.cookie.bus == bus)
2181 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2182 && (cdm->pos.cookie.target != NULL))
2183 return(xpttargettraverse(bus,
2184 (struct cam_et *)cdm->pos.cookie.target,
2185 xptedttargetfunc, arg));
2186 else
2187 return(xpttargettraverse(bus, NULL, xptedttargetfunc, arg));
2188}
2189
2190static int
2191xptedttargetfunc(struct cam_et *target, void *arg)
2192{
2193 struct ccb_dev_match *cdm;
2194
2195 cdm = (struct ccb_dev_match *)arg;
2196
2197 /*
2198 * If there is a device list generation recorded, check it to
2199 * make sure the device list hasn't changed.
2200 */
2201 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2202 && (cdm->pos.cookie.bus == target->bus)
2203 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2204 && (cdm->pos.cookie.target == target)
2205 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2206 && (cdm->pos.generations[CAM_DEV_GENERATION] != 0)
2207 && (cdm->pos.generations[CAM_DEV_GENERATION] !=
2208 target->generation)) {
2209 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2210 return(0);
2211 }
2212
2213 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2214 && (cdm->pos.cookie.bus == target->bus)
2215 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2216 && (cdm->pos.cookie.target == target)
2217 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2218 && (cdm->pos.cookie.device != NULL))
2219 return(xptdevicetraverse(target,
2220 (struct cam_ed *)cdm->pos.cookie.device,
2221 xptedtdevicefunc, arg));
2222 else
2223 return(xptdevicetraverse(target, NULL, xptedtdevicefunc, arg));
2224}
2225
2226static int
2227xptedtdevicefunc(struct cam_ed *device, void *arg)
2228{
2229
2230 struct ccb_dev_match *cdm;
2231 dev_match_ret retval;
2232
2233 cdm = (struct ccb_dev_match *)arg;
2234
2235 /*
2236 * If our position is for something deeper in the tree, that means
2237 * that we've already seen this node. So, we keep going down.
2238 */
2239 if ((cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2240 && (cdm->pos.cookie.device == device)
2241 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2242 && (cdm->pos.cookie.periph != NULL))
2243 retval = DM_RET_DESCEND;
2244 else
2245 retval = xptdevicematch(cdm->patterns, cdm->num_patterns,
2246 device);
2247
2248 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2249 cdm->status = CAM_DEV_MATCH_ERROR;
2250 return(0);
2251 }
2252
2253 /*
2254 * If the copy flag is set, copy this device out.
2255 */
2256 if (retval & DM_RET_COPY) {
2257 int spaceleft, j;
2258
2259 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2260 sizeof(struct dev_match_result));
2261
2262 /*
2263 * If we don't have enough space to put in another
2264 * match result, save our position and tell the
2265 * user there are more devices to check.
2266 */
2267 if (spaceleft < sizeof(struct dev_match_result)) {
2268 bzero(&cdm->pos, sizeof(cdm->pos));
2269 cdm->pos.position_type =
2270 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2271 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE;
2272
2273 cdm->pos.cookie.bus = device->target->bus;
2274 cdm->pos.generations[CAM_BUS_GENERATION]=
2275 xsoftc.bus_generation;
2276 cdm->pos.cookie.target = device->target;
2277 cdm->pos.generations[CAM_TARGET_GENERATION] =
2278 device->target->bus->generation;
2279 cdm->pos.cookie.device = device;
2280 cdm->pos.generations[CAM_DEV_GENERATION] =
2281 device->target->generation;
2282 cdm->status = CAM_DEV_MATCH_MORE;
2283 return(0);
2284 }
2285 j = cdm->num_matches;
2286 cdm->num_matches++;
2287 cdm->matches[j].type = DEV_MATCH_DEVICE;
2288 cdm->matches[j].result.device_result.path_id =
2289 device->target->bus->path_id;
2290 cdm->matches[j].result.device_result.target_id =
2291 device->target->target_id;
2292 cdm->matches[j].result.device_result.target_lun =
2293 device->lun_id;
2294 bcopy(&device->inq_data,
2295 &cdm->matches[j].result.device_result.inq_data,
2296 sizeof(struct scsi_inquiry_data));
2297
2298 /* Let the user know whether this device is unconfigured */
2299 if (device->flags & CAM_DEV_UNCONFIGURED)
2300 cdm->matches[j].result.device_result.flags =
2301 DEV_RESULT_UNCONFIGURED;
2302 else
2303 cdm->matches[j].result.device_result.flags =
2304 DEV_RESULT_NOFLAG;
2305 }
2306
2307 /*
2308 * If the user isn't interested in peripherals, don't descend
2309 * the tree any further.
2310 */
2311 if ((retval & DM_RET_ACTION_MASK) == DM_RET_STOP)
2312 return(1);
2313
2314 /*
2315 * If there is a peripheral list generation recorded, make sure
2316 * it hasn't changed.
2317 */
2318 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2319 && (device->target->bus == cdm->pos.cookie.bus)
2320 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2321 && (device->target == cdm->pos.cookie.target)
2322 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2323 && (device == cdm->pos.cookie.device)
2324 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2325 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2326 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2327 device->generation)){
2328 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2329 return(0);
2330 }
2331
2332 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2333 && (cdm->pos.cookie.bus == device->target->bus)
2334 && (cdm->pos.position_type & CAM_DEV_POS_TARGET)
2335 && (cdm->pos.cookie.target == device->target)
2336 && (cdm->pos.position_type & CAM_DEV_POS_DEVICE)
2337 && (cdm->pos.cookie.device == device)
2338 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2339 && (cdm->pos.cookie.periph != NULL))
2340 return(xptperiphtraverse(device,
2341 (struct cam_periph *)cdm->pos.cookie.periph,
2342 xptedtperiphfunc, arg));
2343 else
2344 return(xptperiphtraverse(device, NULL, xptedtperiphfunc, arg));
2345}
2346
2347static int
2348xptedtperiphfunc(struct cam_periph *periph, void *arg)
2349{
2350 struct ccb_dev_match *cdm;
2351 dev_match_ret retval;
2352
2353 cdm = (struct ccb_dev_match *)arg;
2354
2355 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2356
2357 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2358 cdm->status = CAM_DEV_MATCH_ERROR;
2359 return(0);
2360 }
2361
2362 /*
2363 * If the copy flag is set, copy this peripheral out.
2364 */
2365 if (retval & DM_RET_COPY) {
2366 int spaceleft, j;
2367
2368 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2369 sizeof(struct dev_match_result));
2370
2371 /*
2372 * If we don't have enough space to put in another
2373 * match result, save our position and tell the
2374 * user there are more devices to check.
2375 */
2376 if (spaceleft < sizeof(struct dev_match_result)) {
2377 bzero(&cdm->pos, sizeof(cdm->pos));
2378 cdm->pos.position_type =
2379 CAM_DEV_POS_EDT | CAM_DEV_POS_BUS |
2380 CAM_DEV_POS_TARGET | CAM_DEV_POS_DEVICE |
2381 CAM_DEV_POS_PERIPH;
2382
2383 cdm->pos.cookie.bus = periph->path->bus;
2384 cdm->pos.generations[CAM_BUS_GENERATION]=
2385 xsoftc.bus_generation;
2386 cdm->pos.cookie.target = periph->path->target;
2387 cdm->pos.generations[CAM_TARGET_GENERATION] =
2388 periph->path->bus->generation;
2389 cdm->pos.cookie.device = periph->path->device;
2390 cdm->pos.generations[CAM_DEV_GENERATION] =
2391 periph->path->target->generation;
2392 cdm->pos.cookie.periph = periph;
2393 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2394 periph->path->device->generation;
2395 cdm->status = CAM_DEV_MATCH_MORE;
2396 return(0);
2397 }
2398
2399 j = cdm->num_matches;
2400 cdm->num_matches++;
2401 cdm->matches[j].type = DEV_MATCH_PERIPH;
2402 cdm->matches[j].result.periph_result.path_id =
2403 periph->path->bus->path_id;
2404 cdm->matches[j].result.periph_result.target_id =
2405 periph->path->target->target_id;
2406 cdm->matches[j].result.periph_result.target_lun =
2407 periph->path->device->lun_id;
2408 cdm->matches[j].result.periph_result.unit_number =
2409 periph->unit_number;
2410 strncpy(cdm->matches[j].result.periph_result.periph_name,
2411 periph->periph_name, DEV_IDLEN);
2412 }
2413
2414 return(1);
2415}
2416
2417static int
2418xptedtmatch(struct ccb_dev_match *cdm)
2419{
2420 int ret;
2421
2422 cdm->num_matches = 0;
2423
2424 /*
2425 * Check the bus list generation. If it has changed, the user
2426 * needs to reset everything and start over.
2427 */
2428 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2429 && (cdm->pos.generations[CAM_BUS_GENERATION] != 0)
2430 && (cdm->pos.generations[CAM_BUS_GENERATION] != xsoftc.bus_generation)) {
2431 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2432 return(0);
2433 }
2434
2435 if ((cdm->pos.position_type & CAM_DEV_POS_BUS)
2436 && (cdm->pos.cookie.bus != NULL))
2437 ret = xptbustraverse((struct cam_eb *)cdm->pos.cookie.bus,
2438 xptedtbusfunc, cdm);
2439 else
2440 ret = xptbustraverse(NULL, xptedtbusfunc, cdm);
2441
2442 /*
2443 * If we get back 0, that means that we had to stop before fully
2444 * traversing the EDT. It also means that one of the subroutines
2445 * has set the status field to the proper value. If we get back 1,
2446 * we've fully traversed the EDT and copied out any matching entries.
2447 */
2448 if (ret == 1)
2449 cdm->status = CAM_DEV_MATCH_LAST;
2450
2451 return(ret);
2452}
2453
2454static int
2455xptplistpdrvfunc(struct periph_driver **pdrv, void *arg)
2456{
2457 struct ccb_dev_match *cdm;
2458
2459 cdm = (struct ccb_dev_match *)arg;
2460
2461 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2462 && (cdm->pos.cookie.pdrv == pdrv)
2463 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2464 && (cdm->pos.generations[CAM_PERIPH_GENERATION] != 0)
2465 && (cdm->pos.generations[CAM_PERIPH_GENERATION] !=
2466 (*pdrv)->generation)) {
2467 cdm->status = CAM_DEV_MATCH_LIST_CHANGED;
2468 return(0);
2469 }
2470
2471 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2472 && (cdm->pos.cookie.pdrv == pdrv)
2473 && (cdm->pos.position_type & CAM_DEV_POS_PERIPH)
2474 && (cdm->pos.cookie.periph != NULL))
2475 return(xptpdperiphtraverse(pdrv,
2476 (struct cam_periph *)cdm->pos.cookie.periph,
2477 xptplistperiphfunc, arg));
2478 else
2479 return(xptpdperiphtraverse(pdrv, NULL,xptplistperiphfunc, arg));
2480}
2481
2482static int
2483xptplistperiphfunc(struct cam_periph *periph, void *arg)
2484{
2485 struct ccb_dev_match *cdm;
2486 dev_match_ret retval;
2487
2488 cdm = (struct ccb_dev_match *)arg;
2489
2490 retval = xptperiphmatch(cdm->patterns, cdm->num_patterns, periph);
2491
2492 if ((retval & DM_RET_ACTION_MASK) == DM_RET_ERROR) {
2493 cdm->status = CAM_DEV_MATCH_ERROR;
2494 return(0);
2495 }
2496
2497 /*
2498 * If the copy flag is set, copy this peripheral out.
2499 */
2500 if (retval & DM_RET_COPY) {
2501 int spaceleft, j;
2502
2503 spaceleft = cdm->match_buf_len - (cdm->num_matches *
2504 sizeof(struct dev_match_result));
2505
2506 /*
2507 * If we don't have enough space to put in another
2508 * match result, save our position and tell the
2509 * user there are more devices to check.
2510 */
2511 if (spaceleft < sizeof(struct dev_match_result)) {
2512 struct periph_driver **pdrv;
2513
2514 pdrv = NULL;
2515 bzero(&cdm->pos, sizeof(cdm->pos));
2516 cdm->pos.position_type =
2517 CAM_DEV_POS_PDRV | CAM_DEV_POS_PDPTR |
2518 CAM_DEV_POS_PERIPH;
2519
2520 /*
2521 * This may look a bit non-sensical, but it is
2522 * actually quite logical. There are very few
2523 * peripheral drivers, and bloating every peripheral
2524 * structure with a pointer back to its parent
2525 * peripheral driver linker set entry would cost
2526 * more in the long run than doing this quick lookup.
2527 */
2528 for (pdrv = periph_drivers; *pdrv != NULL; pdrv++) {
2529 if (strcmp((*pdrv)->driver_name,
2530 periph->periph_name) == 0)
2531 break;
2532 }
2533
2534 if (*pdrv == NULL) {
2535 cdm->status = CAM_DEV_MATCH_ERROR;
2536 return(0);
2537 }
2538
2539 cdm->pos.cookie.pdrv = pdrv;
2540 /*
2541 * The periph generation slot does double duty, as
2542 * does the periph pointer slot. They are used for
2543 * both edt and pdrv lookups and positioning.
2544 */
2545 cdm->pos.cookie.periph = periph;
2546 cdm->pos.generations[CAM_PERIPH_GENERATION] =
2547 (*pdrv)->generation;
2548 cdm->status = CAM_DEV_MATCH_MORE;
2549 return(0);
2550 }
2551
2552 j = cdm->num_matches;
2553 cdm->num_matches++;
2554 cdm->matches[j].type = DEV_MATCH_PERIPH;
2555 cdm->matches[j].result.periph_result.path_id =
2556 periph->path->bus->path_id;
2557
2558 /*
2559 * The transport layer peripheral doesn't have a target or
2560 * lun.
2561 */
2562 if (periph->path->target)
2563 cdm->matches[j].result.periph_result.target_id =
2564 periph->path->target->target_id;
2565 else
2566 cdm->matches[j].result.periph_result.target_id = -1;
2567
2568 if (periph->path->device)
2569 cdm->matches[j].result.periph_result.target_lun =
2570 periph->path->device->lun_id;
2571 else
2572 cdm->matches[j].result.periph_result.target_lun = -1;
2573
2574 cdm->matches[j].result.periph_result.unit_number =
2575 periph->unit_number;
2576 strncpy(cdm->matches[j].result.periph_result.periph_name,
2577 periph->periph_name, DEV_IDLEN);
2578 }
2579
2580 return(1);
2581}
2582
2583static int
2584xptperiphlistmatch(struct ccb_dev_match *cdm)
2585{
2586 int ret;
2587
2588 cdm->num_matches = 0;
2589
2590 /*
2591 * At this point in the edt traversal function, we check the bus
2592 * list generation to make sure that no busses have been added or
2593 * removed since the user last sent a XPT_DEV_MATCH ccb through.
2594 * For the peripheral driver list traversal function, however, we
2595 * don't have to worry about new peripheral driver types coming or
2596 * going; they're in a linker set, and therefore can't change
2597 * without a recompile.
2598 */
2599
2600 if ((cdm->pos.position_type & CAM_DEV_POS_PDPTR)
2601 && (cdm->pos.cookie.pdrv != NULL))
2602 ret = xptpdrvtraverse(
2603 (struct periph_driver **)cdm->pos.cookie.pdrv,
2604 xptplistpdrvfunc, cdm);
2605 else
2606 ret = xptpdrvtraverse(NULL, xptplistpdrvfunc, cdm);
2607
2608 /*
2609 * If we get back 0, that means that we had to stop before fully
2610 * traversing the peripheral driver tree. It also means that one of
2611 * the subroutines has set the status field to the proper value. If
2612 * we get back 1, we've fully traversed the EDT and copied out any
2613 * matching entries.
2614 */
2615 if (ret == 1)
2616 cdm->status = CAM_DEV_MATCH_LAST;
2617
2618 return(ret);
2619}
2620
2621static int
2622xptbustraverse(struct cam_eb *start_bus, xpt_busfunc_t *tr_func, void *arg)
2623{
2624 struct cam_eb *bus, *next_bus;
2625 int retval;
2626
2627 retval = 1;
2628
2629 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
2630 for (bus = (start_bus ? start_bus : TAILQ_FIRST(&xsoftc.xpt_busses));
2631 bus != NULL;
2632 bus = next_bus) {
2633 next_bus = TAILQ_NEXT(bus, links);
2634
2635 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
2636 CAM_SIM_LOCK(bus->sim);
2637 retval = tr_func(bus, arg);
2638 CAM_SIM_UNLOCK(bus->sim);
2639 if (retval == 0)
2640 return(retval);
2641 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
2642 }
2643 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
2644
2645 return(retval);
2646}
2647
2648static int
2649xpttargettraverse(struct cam_eb *bus, struct cam_et *start_target,
2650 xpt_targetfunc_t *tr_func, void *arg)
2651{
2652 struct cam_et *target, *next_target;
2653 int retval;
2654
2655 retval = 1;
2656 for (target = (start_target ? start_target :
2657 TAILQ_FIRST(&bus->et_entries));
2658 target != NULL; target = next_target) {
2659
2660 next_target = TAILQ_NEXT(target, links);
2661
2662 retval = tr_func(target, arg);
2663
2664 if (retval == 0)
2665 return(retval);
2666 }
2667
2668 return(retval);
2669}
2670
2671static int
2672xptdevicetraverse(struct cam_et *target, struct cam_ed *start_device,
2673 xpt_devicefunc_t *tr_func, void *arg)
2674{
2675 struct cam_ed *device, *next_device;
2676 int retval;
2677
2678 retval = 1;
2679 for (device = (start_device ? start_device :
2680 TAILQ_FIRST(&target->ed_entries));
2681 device != NULL;
2682 device = next_device) {
2683
2684 next_device = TAILQ_NEXT(device, links);
2685
2686 retval = tr_func(device, arg);
2687
2688 if (retval == 0)
2689 return(retval);
2690 }
2691
2692 return(retval);
2693}
2694
2695static int
2696xptperiphtraverse(struct cam_ed *device, struct cam_periph *start_periph,
2697 xpt_periphfunc_t *tr_func, void *arg)
2698{
2699 struct cam_periph *periph, *next_periph;
2700 int retval;
2701
2702 retval = 1;
2703
2704 for (periph = (start_periph ? start_periph :
2705 SLIST_FIRST(&device->periphs));
2706 periph != NULL;
2707 periph = next_periph) {
2708
2709 next_periph = SLIST_NEXT(periph, periph_links);
2710
2711 retval = tr_func(periph, arg);
2712 if (retval == 0)
2713 return(retval);
2714 }
2715
2716 return(retval);
2717}
2718
2719static int
2720xptpdrvtraverse(struct periph_driver **start_pdrv,
2721 xpt_pdrvfunc_t *tr_func, void *arg)
2722{
2723 struct periph_driver **pdrv;
2724 int retval;
2725
2726 retval = 1;
2727
2728 /*
2729 * We don't traverse the peripheral driver list like we do the
2730 * other lists, because it is a linker set, and therefore cannot be
2731 * changed during runtime. If the peripheral driver list is ever
2732 * re-done to be something other than a linker set (i.e. it can
2733 * change while the system is running), the list traversal should
2734 * be modified to work like the other traversal functions.
2735 */
2736 for (pdrv = (start_pdrv ? start_pdrv : periph_drivers);
2737 *pdrv != NULL; pdrv++) {
2738 retval = tr_func(pdrv, arg);
2739
2740 if (retval == 0)
2741 return(retval);
2742 }
2743
2744 return(retval);
2745}
2746
2747static int
2748xptpdperiphtraverse(struct periph_driver **pdrv,
2749 struct cam_periph *start_periph,
2750 xpt_periphfunc_t *tr_func, void *arg)
2751{
2752 struct cam_periph *periph, *next_periph;
2753 int retval;
2754
2755 retval = 1;
2756
2757 for (periph = (start_periph ? start_periph :
2758 TAILQ_FIRST(&(*pdrv)->units)); periph != NULL;
2759 periph = next_periph) {
2760
2761 next_periph = TAILQ_NEXT(periph, unit_links);
2762
2763 retval = tr_func(periph, arg);
2764 if (retval == 0)
2765 return(retval);
2766 }
2767 return(retval);
2768}
2769
2770static int
2771xptdefbusfunc(struct cam_eb *bus, void *arg)
2772{
2773 struct xpt_traverse_config *tr_config;
2774
2775 tr_config = (struct xpt_traverse_config *)arg;
2776
2777 if (tr_config->depth == XPT_DEPTH_BUS) {
2778 xpt_busfunc_t *tr_func;
2779
2780 tr_func = (xpt_busfunc_t *)tr_config->tr_func;
2781
2782 return(tr_func(bus, tr_config->tr_arg));
2783 } else
2784 return(xpttargettraverse(bus, NULL, xptdeftargetfunc, arg));
2785}
2786
2787static int
2788xptdeftargetfunc(struct cam_et *target, void *arg)
2789{
2790 struct xpt_traverse_config *tr_config;
2791
2792 tr_config = (struct xpt_traverse_config *)arg;
2793
2794 if (tr_config->depth == XPT_DEPTH_TARGET) {
2795 xpt_targetfunc_t *tr_func;
2796
2797 tr_func = (xpt_targetfunc_t *)tr_config->tr_func;
2798
2799 return(tr_func(target, tr_config->tr_arg));
2800 } else
2801 return(xptdevicetraverse(target, NULL, xptdefdevicefunc, arg));
2802}
2803
2804static int
2805xptdefdevicefunc(struct cam_ed *device, void *arg)
2806{
2807 struct xpt_traverse_config *tr_config;
2808
2809 tr_config = (struct xpt_traverse_config *)arg;
2810
2811 if (tr_config->depth == XPT_DEPTH_DEVICE) {
2812 xpt_devicefunc_t *tr_func;
2813
2814 tr_func = (xpt_devicefunc_t *)tr_config->tr_func;
2815
2816 return(tr_func(device, tr_config->tr_arg));
2817 } else
2818 return(xptperiphtraverse(device, NULL, xptdefperiphfunc, arg));
2819}
2820
2821static int
2822xptdefperiphfunc(struct cam_periph *periph, void *arg)
2823{
2824 struct xpt_traverse_config *tr_config;
2825 xpt_periphfunc_t *tr_func;
2826
2827 tr_config = (struct xpt_traverse_config *)arg;
2828
2829 tr_func = (xpt_periphfunc_t *)tr_config->tr_func;
2830
2831 /*
2832 * Unlike the other default functions, we don't check for depth
2833 * here. The peripheral driver level is the last level in the EDT,
2834 * so if we're here, we should execute the function in question.
2835 */
2836 return(tr_func(periph, tr_config->tr_arg));
2837}
2838
2839/*
2840 * Execute the given function for every bus in the EDT.
2841 */
2842static int
2843xpt_for_all_busses(xpt_busfunc_t *tr_func, void *arg)
2844{
2845 struct xpt_traverse_config tr_config;
2846
2847 tr_config.depth = XPT_DEPTH_BUS;
2848 tr_config.tr_func = tr_func;
2849 tr_config.tr_arg = arg;
2850
2851 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2852}
2853
2854/*
2855 * Execute the given function for every device in the EDT.
2856 */
2857static int
2858xpt_for_all_devices(xpt_devicefunc_t *tr_func, void *arg)
2859{
2860 struct xpt_traverse_config tr_config;
2861
2862 tr_config.depth = XPT_DEPTH_DEVICE;
2863 tr_config.tr_func = tr_func;
2864 tr_config.tr_arg = arg;
2865
2866 return(xptbustraverse(NULL, xptdefbusfunc, &tr_config));
2867}
2868
2869static int
2870xptsetasyncfunc(struct cam_ed *device, void *arg)
2871{
2872 struct cam_path path;
2873 struct ccb_getdev cgd;
2874 struct async_node *cur_entry;
2875
2876 cur_entry = (struct async_node *)arg;
2877
2878 /*
2879 * Don't report unconfigured devices (Wildcard devs,
2880 * devices only for target mode, device instances
2881 * that have been invalidated but are waiting for
2882 * their last reference count to be released).
2883 */
2884 if ((device->flags & CAM_DEV_UNCONFIGURED) != 0)
2885 return (1);
2886
2887 xpt_compile_path(&path,
2888 NULL,
2889 device->target->bus->path_id,
2890 device->target->target_id,
2891 device->lun_id);
2892 xpt_setup_ccb(&cgd.ccb_h, &path, /*priority*/1);
2893 cgd.ccb_h.func_code = XPT_GDEV_TYPE;
2894 xpt_action((union ccb *)&cgd);
2895 cur_entry->callback(cur_entry->callback_arg,
2896 AC_FOUND_DEVICE,
2897 &path, &cgd);
2898 xpt_release_path(&path);
2899
2900 return(1);
2901}
2902
2903static int
2904xptsetasyncbusfunc(struct cam_eb *bus, void *arg)
2905{
2906 struct cam_path path;
2907 struct ccb_pathinq cpi;
2908 struct async_node *cur_entry;
2909
2910 cur_entry = (struct async_node *)arg;
2911
2912 xpt_compile_path(&path, /*periph*/NULL,
2913 bus->sim->path_id,
2914 CAM_TARGET_WILDCARD,
2915 CAM_LUN_WILDCARD);
2916 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
2917 cpi.ccb_h.func_code = XPT_PATH_INQ;
2918 xpt_action((union ccb *)&cpi);
2919 cur_entry->callback(cur_entry->callback_arg,
2920 AC_PATH_REGISTERED,
2921 &path, &cpi);
2922 xpt_release_path(&path);
2923
2924 return(1);
2925}
2926
2927static void
2928xpt_action_sasync_cb(void *context, int pending)
2929{
2930 struct async_node *cur_entry;
2931 struct xpt_task *task;
2932 uint32_t added;
2933
2934 task = (struct xpt_task *)context;
2935 cur_entry = (struct async_node *)task->data1;
2936 added = task->data2;
2937
2938 if ((added & AC_FOUND_DEVICE) != 0) {
2939 /*
2940 * Get this peripheral up to date with all
2941 * the currently existing devices.
2942 */
2943 xpt_for_all_devices(xptsetasyncfunc, cur_entry);
2944 }
2945 if ((added & AC_PATH_REGISTERED) != 0) {
2946 /*
2947 * Get this peripheral up to date with all
2948 * the currently existing busses.
2949 */
2950 xpt_for_all_busses(xptsetasyncbusfunc, cur_entry);
2951 }
2952
2953 kfree(task, M_CAMXPT);
2954}
2955
2956void
2957xpt_action(union ccb *start_ccb)
2958{
2959 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_action\n"));
2960
2961 start_ccb->ccb_h.status = CAM_REQ_INPROG;
2962
2963 switch (start_ccb->ccb_h.func_code) {
2964 case XPT_SCSI_IO:
2965 {
2966 struct cam_ed *device;
2967#ifdef CAMDEBUG
2968 char cdb_str[(SCSI_MAX_CDBLEN * 3) + 1];
2969 struct cam_path *path;
2970
2971 path = start_ccb->ccb_h.path;
2972#endif
2973
2974 /*
2975 * For the sake of compatibility with SCSI-1
2976 * devices that may not understand the identify
2977 * message, we include lun information in the
2978 * second byte of all commands. SCSI-1 specifies
2979 * that luns are a 3 bit value and reserves only 3
2980 * bits for lun information in the CDB. Later
2981 * revisions of the SCSI spec allow for more than 8
2982 * luns, but have deprecated lun information in the
2983 * CDB. So, if the lun won't fit, we must omit.
2984 *
2985 * Also be aware that during initial probing for devices,
2986 * the inquiry information is unknown but initialized to 0.
2987 * This means that this code will be exercised while probing
2988 * devices with an ANSI revision greater than 2.
2989 */
2990 device = start_ccb->ccb_h.path->device;
2991 if (device->protocol_version <= SCSI_REV_2
2992 && start_ccb->ccb_h.target_lun < 8
2993 && (start_ccb->ccb_h.flags & CAM_CDB_POINTER) == 0) {
2994
2995 start_ccb->csio.cdb_io.cdb_bytes[1] |=
2996 start_ccb->ccb_h.target_lun << 5;
2997 }
2998 start_ccb->csio.scsi_status = SCSI_STATUS_OK;
2999 CAM_DEBUG(path, CAM_DEBUG_CDB,("%s. CDB: %s\n",
3000 scsi_op_desc(start_ccb->csio.cdb_io.cdb_bytes[0],
3001 &path->device->inq_data),
3002 scsi_cdb_string(start_ccb->csio.cdb_io.cdb_bytes,
3003 cdb_str, sizeof(cdb_str))));
3004 /* FALLTHROUGH */
3005 }
3006 case XPT_TARGET_IO:
3007 case XPT_CONT_TARGET_IO:
3008 start_ccb->csio.sense_resid = 0;
3009 start_ccb->csio.resid = 0;
3010 /* FALLTHROUGH */
3011 case XPT_RESET_DEV:
3012 case XPT_ENG_EXEC:
3013 {
3014 struct cam_path *path;
3015 struct cam_sim *sim;
3016 int runq;
3017
3018 path = start_ccb->ccb_h.path;
3019
3020 sim = path->bus->sim;
3021 if (sim == &cam_dead_sim) {
3022 /* The SIM has gone; just execute the CCB directly. */
3023 cam_ccbq_send_ccb(&path->device->ccbq, start_ccb);
3024 (*(sim->sim_action))(sim, start_ccb);
3025 break;
3026 }
3027
3028 cam_ccbq_insert_ccb(&path->device->ccbq, start_ccb);
3029 if (path->device->qfrozen_cnt == 0)
3030 runq = xpt_schedule_dev_sendq(path->bus, path->device);
3031 else
3032 runq = 0;
3033 if (runq != 0)
3034 xpt_run_dev_sendq(path->bus);
3035 break;
3036 }
3037 case XPT_SET_TRAN_SETTINGS:
3038 {
3039 xpt_set_transfer_settings(&start_ccb->cts,
3040 start_ccb->ccb_h.path->device,
3041 /*async_update*/FALSE);
3042 break;
3043 }
3044 case XPT_CALC_GEOMETRY:
3045 {
3046 struct cam_sim *sim;
3047
3048 /* Filter out garbage */
3049 if (start_ccb->ccg.block_size == 0
3050 || start_ccb->ccg.volume_size == 0) {
3051 start_ccb->ccg.cylinders = 0;
3052 start_ccb->ccg.heads = 0;
3053 start_ccb->ccg.secs_per_track = 0;
3054 start_ccb->ccb_h.status = CAM_REQ_CMP;
3055 break;
3056 }
3057 sim = start_ccb->ccb_h.path->bus->sim;
3058 (*(sim->sim_action))(sim, start_ccb);
3059 break;
3060 }
3061 case XPT_ABORT:
3062 {
3063 union ccb* abort_ccb;
3064
3065 abort_ccb = start_ccb->cab.abort_ccb;
3066 if (XPT_FC_IS_DEV_QUEUED(abort_ccb)) {
3067
3068 if (abort_ccb->ccb_h.pinfo.index >= 0) {
3069 struct cam_ccbq *ccbq;
3070
3071 ccbq = &abort_ccb->ccb_h.path->device->ccbq;
3072 cam_ccbq_remove_ccb(ccbq, abort_ccb);
3073 abort_ccb->ccb_h.status =
3074 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3075 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3076 xpt_done(abort_ccb);
3077 start_ccb->ccb_h.status = CAM_REQ_CMP;
3078 break;
3079 }
3080 if (abort_ccb->ccb_h.pinfo.index == CAM_UNQUEUED_INDEX
3081 && (abort_ccb->ccb_h.status & CAM_SIM_QUEUED) == 0) {
3082 /*
3083 * We've caught this ccb en route to
3084 * the SIM. Flag it for abort and the
3085 * SIM will do so just before starting
3086 * real work on the CCB.
3087 */
3088 abort_ccb->ccb_h.status =
3089 CAM_REQ_ABORTED|CAM_DEV_QFRZN;
3090 xpt_freeze_devq(abort_ccb->ccb_h.path, 1);
3091 start_ccb->ccb_h.status = CAM_REQ_CMP;
3092 break;
3093 }
3094 }
3095 if (XPT_FC_IS_QUEUED(abort_ccb)
3096 && (abort_ccb->ccb_h.pinfo.index == CAM_DONEQ_INDEX)) {
3097 /*
3098 * It's already completed but waiting
3099 * for our SWI to get to it.
3100 */
3101 start_ccb->ccb_h.status = CAM_UA_ABORT;
3102 break;
3103 }
3104 /*
3105 * If we weren't able to take care of the abort request
3106 * in the XPT, pass the request down to the SIM for processing.
3107 */
3108 /* FALLTHROUGH */
3109 }
3110 case XPT_ACCEPT_TARGET_IO:
3111 case XPT_EN_LUN:
3112 case XPT_IMMED_NOTIFY:
3113 case XPT_NOTIFY_ACK:
3114 case XPT_GET_TRAN_SETTINGS:
3115 case XPT_RESET_BUS:
3116 {
3117 struct cam_sim *sim;
3118
3119 sim = start_ccb->ccb_h.path->bus->sim;
3120 (*(sim->sim_action))(sim, start_ccb);
3121 break;
3122 }
3123 case XPT_PATH_INQ:
3124 {
3125 struct cam_sim *sim;
3126
3127 sim = start_ccb->ccb_h.path->bus->sim;
3128 (*(sim->sim_action))(sim, start_ccb);
3129 break;
3130 }
3131 case XPT_PATH_STATS:
3132 start_ccb->cpis.last_reset =
3133 start_ccb->ccb_h.path->bus->last_reset;
3134 start_ccb->ccb_h.status = CAM_REQ_CMP;
3135 break;
3136 case XPT_GDEV_TYPE:
3137 {
3138 struct cam_ed *dev;
3139
3140 dev = start_ccb->ccb_h.path->device;
3141 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3142 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3143 } else {
3144 struct ccb_getdev *cgd;
3145 struct cam_eb *bus;
3146 struct cam_et *tar;
3147
3148 cgd = &start_ccb->cgd;
3149 bus = cgd->ccb_h.path->bus;
3150 tar = cgd->ccb_h.path->target;
3151 cgd->inq_data = dev->inq_data;
3152 cgd->ccb_h.status = CAM_REQ_CMP;
3153 cgd->serial_num_len = dev->serial_num_len;
3154 if ((dev->serial_num_len > 0)
3155 && (dev->serial_num != NULL))
3156 bcopy(dev->serial_num, cgd->serial_num,
3157 dev->serial_num_len);
3158 }
3159 break;
3160 }
3161 case XPT_GDEV_STATS:
3162 {
3163 struct cam_ed *dev;
3164
3165 dev = start_ccb->ccb_h.path->device;
3166 if ((dev->flags & CAM_DEV_UNCONFIGURED) != 0) {
3167 start_ccb->ccb_h.status = CAM_DEV_NOT_THERE;
3168 } else {
3169 struct ccb_getdevstats *cgds;
3170 struct cam_eb *bus;
3171 struct cam_et *tar;
3172
3173 cgds = &start_ccb->cgds;
3174 bus = cgds->ccb_h.path->bus;
3175 tar = cgds->ccb_h.path->target;
3176 cgds->dev_openings = dev->ccbq.dev_openings;
3177 cgds->dev_active = dev->ccbq.dev_active;
3178 cgds->devq_openings = dev->ccbq.devq_openings;
3179 cgds->devq_queued = dev->ccbq.queue.entries;
3180 cgds->held = dev->ccbq.held;
3181 cgds->last_reset = tar->last_reset;
3182 cgds->maxtags = dev->quirk->maxtags;
3183 cgds->mintags = dev->quirk->mintags;
3184 if (timevalcmp(&tar->last_reset, &bus->last_reset, <))
3185 cgds->last_reset = bus->last_reset;
3186 cgds->ccb_h.status = CAM_REQ_CMP;
3187 }
3188 break;
3189 }
3190 case XPT_GDEVLIST:
3191 {
3192 struct cam_periph *nperiph;
3193 struct periph_list *periph_head;
3194 struct ccb_getdevlist *cgdl;
3195 u_int i;
3196 struct cam_ed *device;
3197 int found;
3198
3199
3200 found = 0;
3201
3202 /*
3203 * Don't want anyone mucking with our data.
3204 */
3205 device = start_ccb->ccb_h.path->device;
3206 periph_head = &device->periphs;
3207 cgdl = &start_ccb->cgdl;
3208
3209 /*
3210 * Check and see if the list has changed since the user
3211 * last requested a list member. If so, tell them that the
3212 * list has changed, and therefore they need to start over
3213 * from the beginning.
3214 */
3215 if ((cgdl->index != 0) &&
3216 (cgdl->generation != device->generation)) {
3217 cgdl->status = CAM_GDEVLIST_LIST_CHANGED;
3218 break;
3219 }
3220
3221 /*
3222 * Traverse the list of peripherals and attempt to find
3223 * the requested peripheral.
3224 */
3225 for (nperiph = SLIST_FIRST(periph_head), i = 0;
3226 (nperiph != NULL) && (i <= cgdl->index);
3227 nperiph = SLIST_NEXT(nperiph, periph_links), i++) {
3228 if (i == cgdl->index) {
3229 strncpy(cgdl->periph_name,
3230 nperiph->periph_name,
3231 DEV_IDLEN);
3232 cgdl->unit_number = nperiph->unit_number;
3233 found = 1;
3234 }
3235 }
3236 if (found == 0) {
3237 cgdl->status = CAM_GDEVLIST_ERROR;
3238 break;
3239 }
3240
3241 if (nperiph == NULL)
3242 cgdl->status = CAM_GDEVLIST_LAST_DEVICE;
3243 else
3244 cgdl->status = CAM_GDEVLIST_MORE_DEVS;
3245
3246 cgdl->index++;
3247 cgdl->generation = device->generation;
3248
3249 cgdl->ccb_h.status = CAM_REQ_CMP;
3250 break;
3251 }
3252 case XPT_DEV_MATCH:
3253 {
3254 dev_pos_type position_type;
3255 struct ccb_dev_match *cdm;
3256 int ret;
3257
3258 cdm = &start_ccb->cdm;
3259
3260 /*
3261 * There are two ways of getting at information in the EDT.
3262 * The first way is via the primary EDT tree. It starts
3263 * with a list of busses, then a list of targets on a bus,
3264 * then devices/luns on a target, and then peripherals on a
3265 * device/lun. The "other" way is by the peripheral driver
3266 * lists. The peripheral driver lists are organized by
3267 * peripheral driver. (obviously) So it makes sense to
3268 * use the peripheral driver list if the user is looking
3269 * for something like "da1", or all "da" devices. If the
3270 * user is looking for something on a particular bus/target
3271 * or lun, it's generally better to go through the EDT tree.
3272 */
3273
3274 if (cdm->pos.position_type != CAM_DEV_POS_NONE)
3275 position_type = cdm->pos.position_type;
3276 else {
3277 u_int i;
3278
3279 position_type = CAM_DEV_POS_NONE;
3280
3281 for (i = 0; i < cdm->num_patterns; i++) {
3282 if ((cdm->patterns[i].type == DEV_MATCH_BUS)
3283 ||(cdm->patterns[i].type == DEV_MATCH_DEVICE)){
3284 position_type = CAM_DEV_POS_EDT;
3285 break;
3286 }
3287 }
3288
3289 if (cdm->num_patterns == 0)
3290 position_type = CAM_DEV_POS_EDT;
3291 else if (position_type == CAM_DEV_POS_NONE)
3292 position_type = CAM_DEV_POS_PDRV;
3293 }
3294
3295 switch(position_type & CAM_DEV_POS_TYPEMASK) {
3296 case CAM_DEV_POS_EDT:
3297 ret = xptedtmatch(cdm);
3298 break;
3299 case CAM_DEV_POS_PDRV:
3300 ret = xptperiphlistmatch(cdm);
3301 break;
3302 default:
3303 cdm->status = CAM_DEV_MATCH_ERROR;
3304 break;
3305 }
3306
3307 if (cdm->status == CAM_DEV_MATCH_ERROR)
3308 start_ccb->ccb_h.status = CAM_REQ_CMP_ERR;
3309 else
3310 start_ccb->ccb_h.status = CAM_REQ_CMP;
3311
3312 break;
3313 }
3314 case XPT_SASYNC_CB:
3315 {
3316 struct ccb_setasync *csa;
3317 struct async_node *cur_entry;
3318 struct async_list *async_head;
3319 u_int32_t added;
3320
3321 csa = &start_ccb->csa;
3322 added = csa->event_enable;
3323 async_head = &csa->ccb_h.path->device->asyncs;
3324
3325 /*
3326 * If there is already an entry for us, simply
3327 * update it.
3328 */
3329 cur_entry = SLIST_FIRST(async_head);
3330 while (cur_entry != NULL) {
3331 if ((cur_entry->callback_arg == csa->callback_arg)
3332 && (cur_entry->callback == csa->callback))
3333 break;
3334 cur_entry = SLIST_NEXT(cur_entry, links);
3335 }
3336
3337 if (cur_entry != NULL) {
3338 /*
3339 * If the request has no flags set,
3340 * remove the entry.
3341 */
3342 added &= ~cur_entry->event_enable;
3343 if (csa->event_enable == 0) {
3344 SLIST_REMOVE(async_head, cur_entry,
3345 async_node, links);
3346 csa->ccb_h.path->device->refcount--;
3347 kfree(cur_entry, M_CAMXPT);
3348 } else {
3349 cur_entry->event_enable = csa->event_enable;
3350 }
3351 } else {
3352 cur_entry = kmalloc(sizeof(*cur_entry), M_CAMXPT,
3353 M_INTWAIT);
3354 cur_entry->event_enable = csa->event_enable;
3355 cur_entry->callback_arg = csa->callback_arg;
3356 cur_entry->callback = csa->callback;
3357 SLIST_INSERT_HEAD(async_head, cur_entry, links);
3358 csa->ccb_h.path->device->refcount++;
3359 }
3360
3361 /*
3362 * Need to decouple this operation via a taskqueue so that
3363 * the locking doesn't become a mess.
3364 */
3365 if ((added & (AC_FOUND_DEVICE | AC_PATH_REGISTERED)) != 0) {
3366 struct xpt_task *task;
3367
3368 task = kmalloc(sizeof(struct xpt_task), M_CAMXPT,
3369 M_INTWAIT);
3370
3371 TASK_INIT(&task->task, 0, xpt_action_sasync_cb, task);
3372 task->data1 = cur_entry;
3373 task->data2 = added;
3374 taskqueue_enqueue(taskqueue_thread[mycpuid],
3375 &task->task);
3376 }
3377
3378 start_ccb->ccb_h.status = CAM_REQ_CMP;
3379 break;
3380 }
3381 case XPT_REL_SIMQ:
3382 {
3383 struct ccb_relsim *crs;
3384 struct cam_ed *dev;
3385
3386 crs = &start_ccb->crs;
3387 dev = crs->ccb_h.path->device;
3388 if (dev == NULL) {
3389
3390 crs->ccb_h.status = CAM_DEV_NOT_THERE;
3391 break;
3392 }
3393
3394 if ((crs->release_flags & RELSIM_ADJUST_OPENINGS) != 0) {
3395
3396 if (INQ_DATA_TQ_ENABLED(&dev->inq_data)) {
3397 /* Don't ever go below one opening */
3398 if (crs->openings > 0) {
3399 xpt_dev_ccbq_resize(crs->ccb_h.path,
3400 crs->openings);
3401
3402 if (bootverbose) {
3403 xpt_print(crs->ccb_h.path,
3404 "tagged openings now %d\n",
3405 crs->openings);
3406 }
3407 }
3408 }
3409 }
3410
3411 if ((crs->release_flags & RELSIM_RELEASE_AFTER_TIMEOUT) != 0) {
3412
3413 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
3414
3415 /*
3416 * Just extend the old timeout and decrement
3417 * the freeze count so that a single timeout
3418 * is sufficient for releasing the queue.
3419 */
3420 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3421 callout_stop(&dev->callout);
3422 } else {
3423
3424 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3425 }
3426
3427 callout_reset(&dev->callout,
3428 (crs->release_timeout * hz) / 1000,
3429 xpt_release_devq_timeout, dev);
3430
3431 dev->flags |= CAM_DEV_REL_TIMEOUT_PENDING;
3432
3433 }
3434
3435 if ((crs->release_flags & RELSIM_RELEASE_AFTER_CMDCMPLT) != 0) {
3436
3437 if ((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0) {
3438 /*
3439 * Decrement the freeze count so that a single
3440 * completion is still sufficient to unfreeze
3441 * the queue.
3442 */
3443 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3444 } else {
3445
3446 dev->flags |= CAM_DEV_REL_ON_COMPLETE;
3447 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3448 }
3449 }
3450
3451 if ((crs->release_flags & RELSIM_RELEASE_AFTER_QEMPTY) != 0) {
3452
3453 if ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
3454 || (dev->ccbq.dev_active == 0)) {
3455
3456 start_ccb->ccb_h.flags &= ~CAM_DEV_QFREEZE;
3457 } else {
3458
3459 dev->flags |= CAM_DEV_REL_ON_QUEUE_EMPTY;
3460 start_ccb->ccb_h.flags |= CAM_DEV_QFREEZE;
3461 }
3462 }
3463
3464 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) == 0) {
3465
3466 xpt_release_devq(crs->ccb_h.path, /*count*/1,
3467 /*run_queue*/TRUE);
3468 }
3469 start_ccb->crs.qfrozen_cnt = dev->qfrozen_cnt;
3470 start_ccb->ccb_h.status = CAM_REQ_CMP;
3471 break;
3472 }
3473 case XPT_SCAN_BUS:
3474 xpt_scan_bus(start_ccb->ccb_h.path->periph, start_ccb);
3475 break;
3476 case XPT_SCAN_LUN:
3477 xpt_scan_lun(start_ccb->ccb_h.path->periph,
3478 start_ccb->ccb_h.path, start_ccb->crcn.flags,
3479 start_ccb);
3480 break;
3481 case XPT_DEBUG: {
3482#ifdef CAMDEBUG
3483#ifdef CAM_DEBUG_DELAY
3484 cam_debug_delay = CAM_DEBUG_DELAY;
3485#endif
3486 cam_dflags = start_ccb->cdbg.flags;
3487 if (cam_dpath != NULL) {
3488 xpt_free_path(cam_dpath);
3489 cam_dpath = NULL;
3490 }
3491
3492 if (cam_dflags != CAM_DEBUG_NONE) {
3493 if (xpt_create_path(&cam_dpath, xpt_periph,
3494 start_ccb->ccb_h.path_id,
3495 start_ccb->ccb_h.target_id,
3496 start_ccb->ccb_h.target_lun) !=
3497 CAM_REQ_CMP) {
3498 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3499 cam_dflags = CAM_DEBUG_NONE;
3500 } else {
3501 start_ccb->ccb_h.status = CAM_REQ_CMP;
3502 xpt_print(cam_dpath, "debugging flags now %x\n",
3503 cam_dflags);
3504 }
3505 } else {
3506 cam_dpath = NULL;
3507 start_ccb->ccb_h.status = CAM_REQ_CMP;
3508 }
3509#else /* !CAMDEBUG */
3510 start_ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
3511#endif /* CAMDEBUG */
3512 break;
3513 }
3514 case XPT_NOOP:
3515 if ((start_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0)
3516 xpt_freeze_devq(start_ccb->ccb_h.path, 1);
3517 start_ccb->ccb_h.status = CAM_REQ_CMP;
3518 break;
3519 default:
3520 case XPT_SDEV_TYPE:
3521 case XPT_TERM_IO:
3522 case XPT_ENG_INQ:
3523 /* XXX Implement */
3524 start_ccb->ccb_h.status = CAM_PROVIDE_FAIL;
3525 break;
3526 }
3527}
3528
3529void
3530xpt_polled_action(union ccb *start_ccb)
3531{
3532 u_int32_t timeout;
3533 struct cam_sim *sim;
3534 struct cam_devq *devq;
3535 struct cam_ed *dev;
3536
3537 timeout = start_ccb->ccb_h.timeout;
3538 sim = start_ccb->ccb_h.path->bus->sim;
3539 devq = sim->devq;
3540 dev = start_ccb->ccb_h.path->device;
3541
3542 sim_lock_assert_owned(sim->lock);
3543
3544 /*
3545 * Steal an opening so that no other queued requests
3546 * can get it before us while we simulate interrupts.
3547 */
3548 dev->ccbq.devq_openings--;
3549 dev->ccbq.dev_openings--;
3550
3551 while(((devq && devq->send_openings <= 0) || dev->ccbq.dev_openings < 0)
3552 && (--timeout > 0)) {
3553 DELAY(1000);
3554 (*(sim->sim_poll))(sim);
3555 camisr_runqueue(sim);
3556 }
3557
3558 dev->ccbq.devq_openings++;
3559 dev->ccbq.dev_openings++;
3560
3561 if (timeout != 0) {
3562 xpt_action(start_ccb);
3563 while(--timeout > 0) {
3564 (*(sim->sim_poll))(sim);
3565 camisr_runqueue(sim);
3566 if ((start_ccb->ccb_h.status & CAM_STATUS_MASK)
3567 != CAM_REQ_INPROG)
3568 break;
3569 DELAY(1000);
3570 }
3571 if (timeout == 0) {
3572 /*
3573 * XXX Is it worth adding a sim_timeout entry
3574 * point so we can attempt recovery? If
3575 * this is only used for dumps, I don't think
3576 * it is.
3577 */
3578 start_ccb->ccb_h.status = CAM_CMD_TIMEOUT;
3579 }
3580 } else {
3581 start_ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
3582 }
3583}
3584
3585/*
3586 * Schedule a peripheral driver to receive a ccb when it's
3587 * target device has space for more transactions.
3588 */
3589void
3590xpt_schedule(struct cam_periph *perph, u_int32_t new_priority)
3591{
3592 struct cam_ed *device;
3593 union ccb *work_ccb;
3594 int runq;
3595
3596 sim_lock_assert_owned(perph->sim->lock);
3597
3598 CAM_DEBUG(perph->path, CAM_DEBUG_TRACE, ("xpt_schedule\n"));
3599 device = perph->path->device;
3600 if (periph_is_queued(perph)) {
3601 /* Simply reorder based on new priority */
3602 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3603 (" change priority to %d\n", new_priority));
3604 if (new_priority < perph->pinfo.priority) {
3605 camq_change_priority(&device->drvq,
3606 perph->pinfo.index,
3607 new_priority);
3608 }
3609 runq = 0;
3610 } else if (perph->path->bus->sim == &cam_dead_sim) {
3611 /* The SIM is gone so just call periph_start directly. */
3612 work_ccb = xpt_get_ccb(perph->path->device);
3613 if (work_ccb == NULL)
3614 return; /* XXX */
3615 xpt_setup_ccb(&work_ccb->ccb_h, perph->path, new_priority);
3616 perph->pinfo.priority = new_priority;
3617 perph->periph_start(perph, work_ccb);
3618 return;
3619 } else {
3620 /* New entry on the queue */
3621 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3622 (" added periph to queue\n"));
3623 perph->pinfo.priority = new_priority;
3624 perph->pinfo.generation = ++device->drvq.generation;
3625 camq_insert(&device->drvq, &perph->pinfo);
3626 runq = xpt_schedule_dev_allocq(perph->path->bus, device);
3627 }
3628 if (runq != 0) {
3629 CAM_DEBUG(perph->path, CAM_DEBUG_SUBTRACE,
3630 (" calling xpt_run_devq\n"));
3631 xpt_run_dev_allocq(perph->path->bus);
3632 }
3633}
3634
3635
3636/*
3637 * Schedule a device to run on a given queue.
3638 * If the device was inserted as a new entry on the queue,
3639 * return 1 meaning the device queue should be run. If we
3640 * were already queued, implying someone else has already
3641 * started the queue, return 0 so the caller doesn't attempt
3642 * to run the queue.
3643 */
3644static int
3645xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
3646 u_int32_t new_priority)
3647{
3648 int retval;
3649 u_int32_t old_priority;
3650
3651 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_schedule_dev\n"));
3652
3653 old_priority = pinfo->priority;
3654
3655 /*
3656 * Are we already queued?
3657 */
3658 if (pinfo->index != CAM_UNQUEUED_INDEX) {
3659 /* Simply reorder based on new priority */
3660 if (new_priority < old_priority) {
3661 camq_change_priority(queue, pinfo->index,
3662 new_priority);
3663 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3664 ("changed priority to %d\n",
3665 new_priority));
3666 }
3667 retval = 0;
3668 } else {
3669 /* New entry on the queue */
3670 if (new_priority < old_priority)
3671 pinfo->priority = new_priority;
3672
3673 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3674 ("Inserting onto queue\n"));
3675 pinfo->generation = ++queue->generation;
3676 camq_insert(queue, pinfo);
3677 retval = 1;
3678 }
3679 return (retval);
3680}
3681
3682static void
3683xpt_run_dev_allocq(struct cam_eb *bus)
3684{
3685 struct cam_devq *devq;
3686
3687 if ((devq = bus->sim->devq) == NULL) {
3688 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq: NULL devq\n"));
3689 return;
3690 }
3691 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_allocq\n"));
3692
3693 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3694 (" qfrozen_cnt == 0x%x, entries == %d, "
3695 "openings == %d, active == %d\n",
3696 devq->alloc_queue.qfrozen_cnt,
3697 devq->alloc_queue.entries,
3698 devq->alloc_openings,
3699 devq->alloc_active));
3700
3701 devq->alloc_queue.qfrozen_cnt++;
3702 while ((devq->alloc_queue.entries > 0)
3703 && (devq->alloc_openings > 0)
3704 && (devq->alloc_queue.qfrozen_cnt <= 1)) {
3705 struct cam_ed_qinfo *qinfo;
3706 struct cam_ed *device;
3707 union ccb *work_ccb;
3708 struct cam_periph *drv;
3709 struct camq *drvq;
3710
3711 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
3712 CAMQ_HEAD);
3713 device = qinfo->device;
3714
3715 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3716 ("running device %p\n", device));
3717
3718 drvq = &device->drvq;
3719
3720#ifdef CAMDEBUG
3721 if (drvq->entries <= 0) {
3722 panic("xpt_run_dev_allocq: "
3723 "Device on queue without any work to do");
3724 }
3725#endif
3726 if ((work_ccb = xpt_get_ccb(device)) != NULL) {
3727 devq->alloc_openings--;
3728 devq->alloc_active++;
3729 drv = (struct cam_periph*)camq_remove(drvq, CAMQ_HEAD);
3730 xpt_setup_ccb(&work_ccb->ccb_h, drv->path,
3731 drv->pinfo.priority);
3732 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3733 ("calling periph start\n"));
3734 drv->periph_start(drv, work_ccb);
3735 } else {
3736 /*
3737 * Malloc failure in alloc_ccb
3738 */
3739 /*
3740 * XXX add us to a list to be run from free_ccb
3741 * if we don't have any ccbs active on this
3742 * device queue otherwise we may never get run
3743 * again.
3744 */
3745 break;
3746 }
3747
3748 if (drvq->entries > 0) {
3749 /* We have more work. Attempt to reschedule */
3750 xpt_schedule_dev_allocq(bus, device);
3751 }
3752 }
3753 devq->alloc_queue.qfrozen_cnt--;
3754}
3755
3756static void
3757xpt_run_dev_sendq(struct cam_eb *bus)
3758{
3759 struct cam_devq *devq;
3760
3761 if ((devq = bus->sim->devq) == NULL) {
3762 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq: NULL devq\n"));
3763 return;
3764 }
3765 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_run_dev_sendq\n"));
3766
3767 devq->send_queue.qfrozen_cnt++;
3768 while ((devq->send_queue.entries > 0)
3769 && (devq->send_openings > 0)) {
3770 struct cam_ed_qinfo *qinfo;
3771 struct cam_ed *device;
3772 union ccb *work_ccb;
3773 struct cam_sim *sim;
3774
3775 if (devq->send_queue.qfrozen_cnt > 1) {
3776 break;
3777 }
3778
3779 qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
3780 CAMQ_HEAD);
3781 device = qinfo->device;
3782
3783 /*
3784 * If the device has been "frozen", don't attempt
3785 * to run it.
3786 */
3787 if (device->qfrozen_cnt > 0) {
3788 continue;
3789 }
3790
3791 CAM_DEBUG_PRINT(CAM_DEBUG_XPT,
3792 ("running device %p\n", device));
3793
3794 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
3795 if (work_ccb == NULL) {
3796 kprintf("device on run queue with no ccbs???\n");
3797 continue;
3798 }
3799
3800 if ((work_ccb->ccb_h.flags & CAM_HIGH_POWER) != 0) {
3801
3802 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
3803 if (xsoftc.num_highpower <= 0) {
3804 /*
3805 * We got a high power command, but we
3806 * don't have any available slots. Freeze
3807 * the device queue until we have a slot
3808 * available.
3809 */
3810 device->qfrozen_cnt++;
3811 STAILQ_INSERT_TAIL(&xsoftc.highpowerq,
3812 &work_ccb->ccb_h,
3813 xpt_links.stqe);
3814
3815 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
3816 continue;
3817 } else {
3818 /*
3819 * Consume a high power slot while
3820 * this ccb runs.
3821 */
3822 xsoftc.num_highpower--;
3823 }
3824 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
3825 }
3826 devq->active_dev = device;
3827 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
3828
3829 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
3830
3831 devq->send_openings--;
3832 devq->send_active++;
3833
3834 if (device->ccbq.queue.entries > 0)
3835 xpt_schedule_dev_sendq(bus, device);
3836
3837 if (work_ccb && (work_ccb->ccb_h.flags & CAM_DEV_QFREEZE) != 0){
3838 /*
3839 * The client wants to freeze the queue
3840 * after this CCB is sent.
3841 */
3842 device->qfrozen_cnt++;
3843 }
3844
3845 /* In Target mode, the peripheral driver knows best... */
3846 if (work_ccb->ccb_h.func_code == XPT_SCSI_IO) {
3847 if ((device->inq_flags & SID_CmdQue) != 0
3848 && work_ccb->csio.tag_action != CAM_TAG_ACTION_NONE)
3849 work_ccb->ccb_h.flags |= CAM_TAG_ACTION_VALID;
3850 else
3851 /*
3852 * Clear this in case of a retried CCB that
3853 * failed due to a rejected tag.
3854 */
3855 work_ccb->ccb_h.flags &= ~CAM_TAG_ACTION_VALID;
3856 }
3857
3858 /*
3859 * Device queues can be shared among multiple sim instances
3860 * that reside on different busses. Use the SIM in the queue
3861 * CCB's path, rather than the one in the bus that was passed
3862 * into this function.
3863 */
3864 sim = work_ccb->ccb_h.path->bus->sim;
3865 (*(sim->sim_action))(sim, work_ccb);
3866
3867 devq->active_dev = NULL;
3868 }
3869 devq->send_queue.qfrozen_cnt--;
3870}
3871
3872/*
3873 * This function merges stuff from the slave ccb into the master ccb, while
3874 * keeping important fields in the master ccb constant.
3875 */
3876void
3877xpt_merge_ccb(union ccb *master_ccb, union ccb *slave_ccb)
3878{
3879 /*
3880 * Pull fields that are valid for peripheral drivers to set
3881 * into the master CCB along with the CCB "payload".
3882 */
3883 master_ccb->ccb_h.retry_count = slave_ccb->ccb_h.retry_count;
3884 master_ccb->ccb_h.func_code = slave_ccb->ccb_h.func_code;
3885 master_ccb->ccb_h.timeout = slave_ccb->ccb_h.timeout;
3886 master_ccb->ccb_h.flags = slave_ccb->ccb_h.flags;
3887 bcopy(&(&slave_ccb->ccb_h)[1], &(&master_ccb->ccb_h)[1],
3888 sizeof(union ccb) - sizeof(struct ccb_hdr));
3889}
3890
3891void
3892xpt_setup_ccb(struct ccb_hdr *ccb_h, struct cam_path *path, u_int32_t priority)
3893{
3894 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_setup_ccb\n"));
3895 callout_init(&ccb_h->timeout_ch);
3896 ccb_h->pinfo.priority = priority;
3897 ccb_h->path = path;
3898 ccb_h->path_id = path->bus->path_id;
3899 if (path->target)
3900 ccb_h->target_id = path->target->target_id;
3901 else
3902 ccb_h->target_id = CAM_TARGET_WILDCARD;
3903 if (path->device) {
3904 ccb_h->target_lun = path->device->lun_id;
3905 ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
3906 } else {
3907 ccb_h->target_lun = CAM_TARGET_WILDCARD;
3908 }
3909 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
3910 ccb_h->flags = 0;
3911}
3912
3913/* Path manipulation functions */
3914cam_status
3915xpt_create_path(struct cam_path **new_path_ptr, struct cam_periph *perph,
3916 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3917{
3918 struct cam_path *path;
3919 cam_status status;
3920
3921 path = kmalloc(sizeof(*path), M_CAMXPT, M_INTWAIT);
3922 status = xpt_compile_path(path, perph, path_id, target_id, lun_id);
3923 if (status != CAM_REQ_CMP) {
3924 kfree(path, M_CAMXPT);
3925 path = NULL;
3926 }
3927 *new_path_ptr = path;
3928 return (status);
3929}
3930
3931cam_status
3932xpt_create_path_unlocked(struct cam_path **new_path_ptr,
3933 struct cam_periph *periph, path_id_t path_id,
3934 target_id_t target_id, lun_id_t lun_id)
3935{
3936 struct cam_path *path;
3937 struct cam_eb *bus = NULL;
3938 cam_status status;
3939 int need_unlock = 0;
3940
3941 path = (struct cam_path *)kmalloc(sizeof(*path), M_CAMXPT, M_WAITOK);
3942
3943 if (path_id != CAM_BUS_WILDCARD) {
3944 bus = xpt_find_bus(path_id);
3945 if (bus != NULL) {
3946 need_unlock = 1;
3947 CAM_SIM_LOCK(bus->sim);
3948 }
3949 }
3950 status = xpt_compile_path(path, periph, path_id, target_id, lun_id);
3951 if (need_unlock)
3952 CAM_SIM_UNLOCK(bus->sim);
3953 if (status != CAM_REQ_CMP) {
3954 kfree(path, M_CAMXPT);
3955 path = NULL;
3956 }
3957 *new_path_ptr = path;
3958 return (status);
3959}
3960
3961static cam_status
3962xpt_compile_path(struct cam_path *new_path, struct cam_periph *perph,
3963 path_id_t path_id, target_id_t target_id, lun_id_t lun_id)
3964{
3965 struct cam_eb *bus;
3966 struct cam_et *target;
3967 struct cam_ed *device;
3968 cam_status status;
3969
3970 status = CAM_REQ_CMP; /* Completed without error */
3971 target = NULL; /* Wildcarded */
3972 device = NULL; /* Wildcarded */
3973
3974 /*
3975 * We will potentially modify the EDT, so block interrupts
3976 * that may attempt to create cam paths.
3977 */
3978 bus = xpt_find_bus(path_id);
3979 if (bus == NULL) {
3980 status = CAM_PATH_INVALID;
3981 } else {
3982 target = xpt_find_target(bus, target_id);
3983 if (target == NULL) {
3984 /* Create one */
3985 struct cam_et *new_target;
3986
3987 new_target = xpt_alloc_target(bus, target_id);
3988 if (new_target == NULL) {
3989 status = CAM_RESRC_UNAVAIL;
3990 } else {
3991 target = new_target;
3992 }
3993 }
3994 if (target != NULL) {
3995 device = xpt_find_device(target, lun_id);
3996 if (device == NULL) {
3997 /* Create one */
3998 struct cam_ed *new_device;
3999
4000 new_device = xpt_alloc_device(bus,
4001 target,
4002 lun_id);
4003 if (new_device == NULL) {
4004 status = CAM_RESRC_UNAVAIL;
4005 } else {
4006 device = new_device;
4007 }
4008 }
4009 }
4010 }
4011
4012 /*
4013 * Only touch the user's data if we are successful.
4014 */
4015 if (status == CAM_REQ_CMP) {
4016 new_path->periph = perph;
4017 new_path->bus = bus;
4018 new_path->target = target;
4019 new_path->device = device;
4020 CAM_DEBUG(new_path, CAM_DEBUG_TRACE, ("xpt_compile_path\n"));
4021 } else {
4022 if (device != NULL)
4023 xpt_release_device(bus, target, device);
4024 if (target != NULL)
4025 xpt_release_target(bus, target);
4026 if (bus != NULL)
4027 xpt_release_bus(bus);
4028 }
4029 return (status);
4030}
4031
4032static void
4033xpt_release_path(struct cam_path *path)
4034{
4035 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_release_path\n"));
4036 if (path->device != NULL) {
4037 xpt_release_device(path->bus, path->target, path->device);
4038 path->device = NULL;
4039 }
4040 if (path->target != NULL) {
4041 xpt_release_target(path->bus, path->target);
4042 path->target = NULL;
4043 }
4044 if (path->bus != NULL) {
4045 xpt_release_bus(path->bus);
4046 path->bus = NULL;
4047 }
4048}
4049
4050void
4051xpt_free_path(struct cam_path *path)
4052{
4053 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_free_path\n"));
4054 xpt_release_path(path);
4055 kfree(path, M_CAMXPT);
4056}
4057
4058
4059/*
4060 * Return -1 for failure, 0 for exact match, 1 for match with wildcards
4061 * in path1, 2 for match with wildcards in path2.
4062 */
4063int
4064xpt_path_comp(struct cam_path *path1, struct cam_path *path2)
4065{
4066 int retval = 0;
4067
4068 if (path1->bus != path2->bus) {
4069 if (path1->bus->path_id == CAM_BUS_WILDCARD)
4070 retval = 1;
4071 else if (path2->bus->path_id == CAM_BUS_WILDCARD)
4072 retval = 2;
4073 else
4074 return (-1);
4075 }
4076 if (path1->target != path2->target) {
4077 if (path1->target->target_id == CAM_TARGET_WILDCARD) {
4078 if (retval == 0)
4079 retval = 1;
4080 } else if (path2->target->target_id == CAM_TARGET_WILDCARD)
4081 retval = 2;
4082 else
4083 return (-1);
4084 }
4085 if (path1->device != path2->device) {
4086 if (path1->device->lun_id == CAM_LUN_WILDCARD) {
4087 if (retval == 0)
4088 retval = 1;
4089 } else if (path2->device->lun_id == CAM_LUN_WILDCARD)
4090 retval = 2;
4091 else
4092 return (-1);
4093 }
4094 return (retval);
4095}
4096
4097void
4098xpt_print_path(struct cam_path *path)
4099{
4100
4101 if (path == NULL)
4102 kprintf("(nopath): ");
4103 else {
4104 if (path->periph != NULL)
4105 kprintf("(%s%d:", path->periph->periph_name,
4106 path->periph->unit_number);
4107 else
4108 kprintf("(noperiph:");
4109
4110 if (path->bus != NULL)
4111 kprintf("%s%d:%d:", path->bus->sim->sim_name,
4112 path->bus->sim->unit_number,
4113 path->bus->sim->bus_id);
4114 else
4115 kprintf("nobus:");
4116
4117 if (path->target != NULL)
4118 kprintf("%d:", path->target->target_id);
4119 else
4120 kprintf("X:");
4121
4122 if (path->device != NULL)
4123 kprintf("%d): ", path->device->lun_id);
4124 else
4125 kprintf("X): ");
4126 }
4127}
4128
4129void
4130xpt_print(struct cam_path *path, const char *fmt, ...)
4131{
4132 __va_list ap;
4133 xpt_print_path(path);
4134 __va_start(ap, fmt);
4135 kvprintf(fmt, ap);
4136 __va_end(ap);
4137}
4138
4139int
4140xpt_path_string(struct cam_path *path, char *str, size_t str_len)
4141{
4142 struct sbuf sb;
4143
4144 sim_lock_assert_owned(path->bus->sim->lock);
4145
4146 sbuf_new(&sb, str, str_len, 0);
4147
4148 if (path == NULL)
4149 sbuf_printf(&sb, "(nopath): ");
4150 else {
4151 if (path->periph != NULL)
4152 sbuf_printf(&sb, "(%s%d:", path->periph->periph_name,
4153 path->periph->unit_number);
4154 else
4155 sbuf_printf(&sb, "(noperiph:");
4156
4157 if (path->bus != NULL)
4158 sbuf_printf(&sb, "%s%d:%d:", path->bus->sim->sim_name,
4159 path->bus->sim->unit_number,
4160 path->bus->sim->bus_id);
4161 else
4162 sbuf_printf(&sb, "nobus:");
4163
4164 if (path->target != NULL)
4165 sbuf_printf(&sb, "%d:", path->target->target_id);
4166 else
4167 sbuf_printf(&sb, "X:");
4168
4169 if (path->device != NULL)
4170 sbuf_printf(&sb, "%d): ", path->device->lun_id);
4171 else
4172 sbuf_printf(&sb, "X): ");
4173 }
4174 sbuf_finish(&sb);
4175
4176 return(sbuf_len(&sb));
4177}
4178
4179path_id_t
4180xpt_path_path_id(struct cam_path *path)
4181{
4182 sim_lock_assert_owned(path->bus->sim->lock);
4183
4184 return(path->bus->path_id);
4185}
4186
4187target_id_t
4188xpt_path_target_id(struct cam_path *path)
4189{
4190 sim_lock_assert_owned(path->bus->sim->lock);
4191
4192 if (path->target != NULL)
4193 return (path->target->target_id);
4194 else
4195 return (CAM_TARGET_WILDCARD);
4196}
4197
4198lun_id_t
4199xpt_path_lun_id(struct cam_path *path)
4200{
4201 sim_lock_assert_owned(path->bus->sim->lock);
4202
4203 if (path->device != NULL)
4204 return (path->device->lun_id);
4205 else
4206 return (CAM_LUN_WILDCARD);
4207}
4208
4209struct cam_sim *
4210xpt_path_sim(struct cam_path *path)
4211{
4212 return (path->bus->sim);
4213}
4214
4215struct cam_periph*
4216xpt_path_periph(struct cam_path *path)
4217{
4218 sim_lock_assert_owned(path->bus->sim->lock);
4219
4220 return (path->periph);
4221}
4222
4223/*
4224 * Release a CAM control block for the caller. Remit the cost of the structure
4225 * to the device referenced by the path. If the this device had no 'credits'
4226 * and peripheral drivers have registered async callbacks for this notification
4227 * call them now.
4228 */
4229void
4230xpt_release_ccb(union ccb *free_ccb)
4231{
4232 struct cam_path *path;
4233 struct cam_ed *device;
4234 struct cam_eb *bus;
4235 struct cam_sim *sim;
4236
4237 CAM_DEBUG_PRINT(CAM_DEBUG_XPT, ("xpt_release_ccb\n"));
4238 path = free_ccb->ccb_h.path;
4239 device = path->device;
4240 bus = path->bus;
4241 sim = bus->sim;
4242
4243 sim_lock_assert_owned(sim->lock);
4244
4245 cam_ccbq_release_opening(&device->ccbq);
4246 if (sim->ccb_count > sim->max_ccbs) {
4247 xpt_free_ccb(free_ccb);
4248 sim->ccb_count--;
4249 } else if (sim == &cam_dead_sim) {
4250 xpt_free_ccb(free_ccb);
4251 } else {
4252 SLIST_INSERT_HEAD(&sim->ccb_freeq, &free_ccb->ccb_h,
4253 xpt_links.sle);
4254 }
4255 if (sim->devq == NULL) {
4256 return;
4257 }
4258 sim->devq->alloc_openings++;
4259 sim->devq->alloc_active--;
4260 /* XXX Turn this into an inline function - xpt_run_device?? */
4261 if ((device_is_alloc_queued(device) == 0)
4262 && (device->drvq.entries > 0)) {
4263 xpt_schedule_dev_allocq(bus, device);
4264 }
4265 if (dev_allocq_is_runnable(sim->devq))
4266 xpt_run_dev_allocq(bus);
4267}
4268
4269/* Functions accessed by SIM drivers */
4270
4271/*
4272 * A sim structure, listing the SIM entry points and instance
4273 * identification info is passed to xpt_bus_register to hook the SIM
4274 * into the CAM framework. xpt_bus_register creates a cam_eb entry
4275 * for this new bus and places it in the array of busses and assigns
4276 * it a path_id. The path_id may be influenced by "hard wiring"
4277 * information specified by the user. Once interrupt services are
4278 * availible, the bus will be probed.
4279 */
4280int32_t
4281xpt_bus_register(struct cam_sim *sim, u_int32_t bus)
4282{
4283 struct cam_eb *new_bus;
4284 struct cam_eb *old_bus;
4285 struct ccb_pathinq cpi;
4286
4287 sim_lock_assert_owned(sim->lock);
4288
4289 sim->bus_id = bus;
4290 new_bus = kmalloc(sizeof(*new_bus), M_CAMXPT, M_INTWAIT);
4291
4292 if (strcmp(sim->sim_name, "xpt") != 0) {
4293 sim->path_id =
4294 xptpathid(sim->sim_name, sim->unit_number, sim->bus_id);
4295 }
4296
4297 TAILQ_INIT(&new_bus->et_entries);
4298 new_bus->path_id = sim->path_id;
4299 new_bus->sim = sim;
4300 ++sim->refcount;
4301 timevalclear(&new_bus->last_reset);
4302 new_bus->flags = 0;
4303 new_bus->refcount = 1; /* Held until a bus_deregister event */
4304 new_bus->generation = 0;
4305 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4306 old_bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4307 while (old_bus != NULL
4308 && old_bus->path_id < new_bus->path_id)
4309 old_bus = TAILQ_NEXT(old_bus, links);
4310 if (old_bus != NULL)
4311 TAILQ_INSERT_BEFORE(old_bus, new_bus, links);
4312 else
4313 TAILQ_INSERT_TAIL(&xsoftc.xpt_busses, new_bus, links);
4314 xsoftc.bus_generation++;
4315 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
4316
4317 /* Notify interested parties */
4318 if (sim->path_id != CAM_XPT_PATH_ID) {
4319 struct cam_path path;
4320
4321 xpt_compile_path(&path, /*periph*/NULL, sim->path_id,
4322 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4323 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
4324 cpi.ccb_h.func_code = XPT_PATH_INQ;
4325 xpt_action((union ccb *)&cpi);
4326 xpt_async(AC_PATH_REGISTERED, &path, &cpi);
4327 xpt_release_path(&path);
4328 }
4329 return (CAM_SUCCESS);
4330}
4331
4332/*
4333 * Deregister a bus. We must clean out all transactions pending on the bus.
4334 * This routine is typically called prior to cam_sim_free() (e.g. see
4335 * dev/usbmisc/umass/umass.c)
4336 */
4337int32_t
4338xpt_bus_deregister(path_id_t pathid)
4339{
4340 struct cam_path bus_path;
4341 struct cam_et *target;
4342 struct cam_ed *device;
4343 struct cam_ed_qinfo *qinfo;
4344 struct cam_devq *devq;
4345 struct cam_periph *periph;
4346 struct cam_sim *ccbsim;
4347 union ccb *work_ccb;
4348 cam_status status;
4349 int retries = 0;
4350
4351 status = xpt_compile_path(&bus_path, NULL, pathid,
4352 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
4353 if (status != CAM_REQ_CMP)
4354 return (status);
4355
4356 /*
4357 * This should clear out all pending requests and timeouts, but
4358 * the ccb's may be queued to a software interrupt.
4359 *
4360 * XXX AC_LOST_DEVICE does not precisely abort the pending requests,
4361 * and it really ought to.
4362 */
4363 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4364 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4365
4366 /*
4367 * Mark the SIM as having been deregistered. This prevents
4368 * certain operations from re-queueing to it, stops new devices
4369 * from being added, etc.
4370 */
4371 devq = bus_path.bus->sim->devq;
4372 ccbsim = bus_path.bus->sim;
4373 ccbsim->flags |= CAM_SIM_DEREGISTERED;
4374
4375again:
4376 /*
4377 * Execute any pending operations now.
4378 */
4379 while ((qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->send_queue,
4380 CAMQ_HEAD)) != NULL ||
4381 (qinfo = (struct cam_ed_qinfo *)camq_remove(&devq->alloc_queue,
4382 CAMQ_HEAD)) != NULL) {
4383 do {
4384 device = qinfo->device;
4385 work_ccb = cam_ccbq_peek_ccb(&device->ccbq, CAMQ_HEAD);
4386 if (work_ccb != NULL) {
4387 devq->active_dev = device;
4388 cam_ccbq_remove_ccb(&device->ccbq, work_ccb);
4389 cam_ccbq_send_ccb(&device->ccbq, work_ccb);
4390 (*(ccbsim->sim_action))(ccbsim, work_ccb);
4391 }
4392
4393 periph = (struct cam_periph *)camq_remove(&device->drvq,
4394 CAMQ_HEAD);
4395 if (periph != NULL)
4396 xpt_schedule(periph, periph->pinfo.priority);
4397 } while (work_ccb != NULL || periph != NULL);
4398 }
4399
4400 /*
4401 * Make sure all completed CCBs are processed.
4402 */
4403 while (!TAILQ_EMPTY(&ccbsim->sim_doneq)) {
4404 camisr_runqueue(ccbsim);
4405 }
4406
4407 /*
4408 * Check for requeues, reissues asyncs if necessary
4409 */
4410 if (CAMQ_GET_HEAD(&devq->send_queue))
4411 kprintf("camq: devq send_queue still in use (%d entries)\n",
4412 devq->send_queue.entries);
4413 if (CAMQ_GET_HEAD(&devq->alloc_queue))
4414 kprintf("camq: devq alloc_queue still in use (%d entries)\n",
4415 devq->alloc_queue.entries);
4416 if (CAMQ_GET_HEAD(&devq->send_queue) ||
4417 CAMQ_GET_HEAD(&devq->alloc_queue)) {
4418 if (++retries < 5) {
4419 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4420 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4421 goto again;
4422 }
4423 }
4424
4425 /*
4426 * Retarget the bus and all cached sim pointers to dead_sim.
4427 *
4428 * Various CAM subsystems may be holding on to targets, devices,
4429 * and/or peripherals and may attempt to use the sim pointer cached
4430 * in some of these structures during close.
4431 */
4432 bus_path.bus->sim = &cam_dead_sim;
4433 TAILQ_FOREACH(target, &bus_path.bus->et_entries, links) {
4434 TAILQ_FOREACH(device, &target->ed_entries, links) {
4435 device->sim = &cam_dead_sim;
4436 SLIST_FOREACH(periph, &device->periphs, periph_links) {
4437 periph->sim = &cam_dead_sim;
4438 }
4439 }
4440 }
4441
4442 /*
4443 * Repeat the async's for the benefit of any new devices, such as
4444 * might be created from completed probes. Any new device
4445 * ops will run on dead_sim.
4446 *
4447 * XXX There are probably races :-(
4448 */
4449 CAM_SIM_LOCK(&cam_dead_sim);
4450 xpt_async(AC_LOST_DEVICE, &bus_path, NULL);
4451 xpt_async(AC_PATH_DEREGISTERED, &bus_path, NULL);
4452 CAM_SIM_UNLOCK(&cam_dead_sim);
4453
4454 /* Release the reference count held while registered. */
4455 xpt_release_bus(bus_path.bus);
4456 xpt_release_path(&bus_path);
4457
4458 /* Release the ref we got when the bus was registered */
4459 cam_sim_release(ccbsim, 0);
4460
4461 return (CAM_REQ_CMP);
4462}
4463
4464static path_id_t
4465xptnextfreepathid(void)
4466{
4467 struct cam_eb *bus;
4468 path_id_t pathid;
4469 char *strval;
4470
4471 pathid = 0;
4472 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4473 bus = TAILQ_FIRST(&xsoftc.xpt_busses);
4474retry:
4475 /* Find an unoccupied pathid */
4476 while (bus != NULL && bus->path_id <= pathid) {
4477 if (bus->path_id == pathid)
4478 pathid++;
4479 bus = TAILQ_NEXT(bus, links);
4480 }
4481 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
4482
4483 /*
4484 * Ensure that this pathid is not reserved for
4485 * a bus that may be registered in the future.
4486 */
4487 if (resource_string_value("scbus", pathid, "at", &strval) == 0) {
4488 ++pathid;
4489 /* Start the search over */
4490 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4491 goto retry;
4492 }
4493 return (pathid);
4494}
4495
4496static path_id_t
4497xptpathid(const char *sim_name, int sim_unit, int sim_bus)
4498{
4499 path_id_t pathid;
4500 int i, dunit, val;
4501 char buf[32];
4502
4503 pathid = CAM_XPT_PATH_ID;
4504 ksnprintf(buf, sizeof(buf), "%s%d", sim_name, sim_unit);
4505 i = -1;
4506 while ((i = resource_query_string(i, "at", buf)) != -1) {
4507 if (strcmp(resource_query_name(i), "scbus")) {
4508 /* Avoid a bit of foot shooting. */
4509 continue;
4510 }
4511 dunit = resource_query_unit(i);
4512 if (dunit < 0) /* unwired?! */
4513 continue;
4514 if (resource_int_value("scbus", dunit, "bus", &val) == 0) {
4515 if (sim_bus == val) {
4516 pathid = dunit;
4517 break;
4518 }
4519 } else if (sim_bus == 0) {
4520 /* Unspecified matches bus 0 */
4521 pathid = dunit;
4522 break;
4523 } else {
4524 kprintf("Ambiguous scbus configuration for %s%d "
4525 "bus %d, cannot wire down. The kernel "
4526 "config entry for scbus%d should "
4527 "specify a controller bus.\n"
4528 "Scbus will be assigned dynamically.\n",
4529 sim_name, sim_unit, sim_bus, dunit);
4530 break;
4531 }
4532 }
4533
4534 if (pathid == CAM_XPT_PATH_ID)
4535 pathid = xptnextfreepathid();
4536 return (pathid);
4537}
4538
4539void
4540xpt_async(u_int32_t async_code, struct cam_path *path, void *async_arg)
4541{
4542 struct cam_eb *bus;
4543 struct cam_et *target, *next_target;
4544 struct cam_ed *device, *next_device;
4545
4546 sim_lock_assert_owned(path->bus->sim->lock);
4547
4548 CAM_DEBUG(path, CAM_DEBUG_TRACE, ("xpt_async\n"));
4549
4550 /*
4551 * Most async events come from a CAM interrupt context. In
4552 * a few cases, the error recovery code at the peripheral layer,
4553 * which may run from our SWI or a process context, may signal
4554 * deferred events with a call to xpt_async.
4555 */
4556
4557 bus = path->bus;
4558
4559 if (async_code == AC_BUS_RESET) {
4560 /* Update our notion of when the last reset occurred */
4561 microuptime(&bus->last_reset);
4562 }
4563
4564 for (target = TAILQ_FIRST(&bus->et_entries);
4565 target != NULL;
4566 target = next_target) {
4567
4568 next_target = TAILQ_NEXT(target, links);
4569
4570 if (path->target != target
4571 && path->target->target_id != CAM_TARGET_WILDCARD
4572 && target->target_id != CAM_TARGET_WILDCARD)
4573 continue;
4574
4575 if (async_code == AC_SENT_BDR) {
4576 /* Update our notion of when the last reset occurred */
4577 microuptime(&path->target->last_reset);
4578 }
4579
4580 for (device = TAILQ_FIRST(&target->ed_entries);
4581 device != NULL;
4582 device = next_device) {
4583
4584 next_device = TAILQ_NEXT(device, links);
4585
4586 if (path->device != device
4587 && path->device->lun_id != CAM_LUN_WILDCARD
4588 && device->lun_id != CAM_LUN_WILDCARD)
4589 continue;
4590
4591 xpt_dev_async(async_code, bus, target,
4592 device, async_arg);
4593
4594 xpt_async_bcast(&device->asyncs, async_code,
4595 path, async_arg);
4596 }
4597 }
4598
4599 /*
4600 * If this wasn't a fully wildcarded async, tell all
4601 * clients that want all async events.
4602 */
4603 if (bus != xpt_periph->path->bus)
4604 xpt_async_bcast(&xpt_periph->path->device->asyncs, async_code,
4605 path, async_arg);
4606}
4607
4608static void
4609xpt_async_bcast(struct async_list *async_head,
4610 u_int32_t async_code,
4611 struct cam_path *path, void *async_arg)
4612{
4613 struct async_node *cur_entry;
4614
4615 cur_entry = SLIST_FIRST(async_head);
4616 while (cur_entry != NULL) {
4617 struct async_node *next_entry;
4618 /*
4619 * Grab the next list entry before we call the current
4620 * entry's callback. This is because the callback function
4621 * can delete its async callback entry.
4622 */
4623 next_entry = SLIST_NEXT(cur_entry, links);
4624 if ((cur_entry->event_enable & async_code) != 0)
4625 cur_entry->callback(cur_entry->callback_arg,
4626 async_code, path,
4627 async_arg);
4628 cur_entry = next_entry;
4629 }
4630}
4631
4632/*
4633 * Handle any per-device event notifications that require action by the XPT.
4634 */
4635static void
4636xpt_dev_async(u_int32_t async_code, struct cam_eb *bus, struct cam_et *target,
4637 struct cam_ed *device, void *async_arg)
4638{
4639 cam_status status;
4640 struct cam_path newpath;
4641
4642 /*
4643 * We only need to handle events for real devices.
4644 */
4645 if (target->target_id == CAM_TARGET_WILDCARD
4646 || device->lun_id == CAM_LUN_WILDCARD)
4647 return;
4648
4649 /*
4650 * We need our own path with wildcards expanded to
4651 * handle certain types of events.
4652 */
4653 if ((async_code == AC_SENT_BDR)
4654 || (async_code == AC_BUS_RESET)
4655 || (async_code == AC_INQ_CHANGED))
4656 status = xpt_compile_path(&newpath, NULL,
4657 bus->path_id,
4658 target->target_id,
4659 device->lun_id);
4660 else
4661 status = CAM_REQ_CMP_ERR;
4662
4663 if (status == CAM_REQ_CMP) {
4664
4665 /*
4666 * Allow transfer negotiation to occur in a
4667 * tag free environment.
4668 */
4669 if (async_code == AC_SENT_BDR
4670 || async_code == AC_BUS_RESET)
4671 xpt_toggle_tags(&newpath);
4672
4673 if (async_code == AC_INQ_CHANGED) {
4674 /*
4675 * We've sent a start unit command, or
4676 * something similar to a device that
4677 * may have caused its inquiry data to
4678 * change. So we re-scan the device to
4679 * refresh the inquiry data for it.
4680 */
4681 xpt_scan_lun(newpath.periph, &newpath,
4682 CAM_EXPECT_INQ_CHANGE, NULL);
4683 }
4684 xpt_release_path(&newpath);
4685 } else if (async_code == AC_LOST_DEVICE) {
4686 /*
4687 * When we lose a device the device may be about to detach
4688 * the sim, we have to clear out all pending timeouts and
4689 * requests before that happens. XXX it would be nice if
4690 * we could abort the requests pertaining to the device.
4691 */
4692 xpt_release_devq_timeout(device);
4693 if ((device->flags & CAM_DEV_UNCONFIGURED) == 0) {
4694 device->flags |= CAM_DEV_UNCONFIGURED;
4695 xpt_release_device(bus, target, device);
4696 }
4697 } else if (async_code == AC_TRANSFER_NEG) {
4698 struct ccb_trans_settings *settings;
4699
4700 settings = (struct ccb_trans_settings *)async_arg;
4701 xpt_set_transfer_settings(settings, device,
4702 /*async_update*/TRUE);
4703 }
4704}
4705
4706u_int32_t
4707xpt_freeze_devq(struct cam_path *path, u_int count)
4708{
4709 struct ccb_hdr *ccbh;
4710
4711 sim_lock_assert_owned(path->bus->sim->lock);
4712
4713 path->device->qfrozen_cnt += count;
4714
4715 /*
4716 * Mark the last CCB in the queue as needing
4717 * to be requeued if the driver hasn't
4718 * changed it's state yet. This fixes a race
4719 * where a ccb is just about to be queued to
4720 * a controller driver when it's interrupt routine
4721 * freezes the queue. To completly close the
4722 * hole, controller drives must check to see
4723 * if a ccb's status is still CAM_REQ_INPROG
4724 * just before they queue
4725 * the CCB. See ahc_action/ahc_freeze_devq for
4726 * an example.
4727 */
4728 ccbh = TAILQ_LAST(&path->device->ccbq.active_ccbs, ccb_hdr_tailq);
4729 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4730 ccbh->status = CAM_REQUEUE_REQ;
4731 return (path->device->qfrozen_cnt);
4732}
4733
4734u_int32_t
4735xpt_freeze_simq(struct cam_sim *sim, u_int count)
4736{
4737 sim_lock_assert_owned(sim->lock);
4738
4739 if (sim->devq == NULL)
4740 return(count);
4741 sim->devq->send_queue.qfrozen_cnt += count;
4742 if (sim->devq->active_dev != NULL) {
4743 struct ccb_hdr *ccbh;
4744
4745 ccbh = TAILQ_LAST(&sim->devq->active_dev->ccbq.active_ccbs,
4746 ccb_hdr_tailq);
4747 if (ccbh && ccbh->status == CAM_REQ_INPROG)
4748 ccbh->status = CAM_REQUEUE_REQ;
4749 }
4750 return (sim->devq->send_queue.qfrozen_cnt);
4751}
4752
4753/*
4754 * WARNING: most devices, especially USB/UMASS, may detach their sim early.
4755 * We ref-count the sim (and the bus only NULLs it out when the bus has been
4756 * freed, which is not the case here), but the device queue is also freed XXX
4757 * and we have to check that here.
4758 *
4759 * XXX fixme: could we simply not null-out the device queue via
4760 * cam_sim_free()?
4761 */
4762static void
4763xpt_release_devq_timeout(void *arg)
4764{
4765 struct cam_ed *device;
4766
4767 device = (struct cam_ed *)arg;
4768
4769 xpt_release_devq_device(device, /*count*/1, /*run_queue*/TRUE);
4770}
4771
4772void
4773xpt_release_devq(struct cam_path *path, u_int count, int run_queue)
4774{
4775 sim_lock_assert_owned(path->bus->sim->lock);
4776
4777 xpt_release_devq_device(path->device, count, run_queue);
4778}
4779
4780static void
4781xpt_release_devq_device(struct cam_ed *dev, u_int count, int run_queue)
4782{
4783 int rundevq;
4784
4785 rundevq = 0;
4786
4787 if (dev->qfrozen_cnt > 0) {
4788
4789 count = (count > dev->qfrozen_cnt) ? dev->qfrozen_cnt : count;
4790 dev->qfrozen_cnt -= count;
4791 if (dev->qfrozen_cnt == 0) {
4792
4793 /*
4794 * No longer need to wait for a successful
4795 * command completion.
4796 */
4797 dev->flags &= ~CAM_DEV_REL_ON_COMPLETE;
4798
4799 /*
4800 * Remove any timeouts that might be scheduled
4801 * to release this queue.
4802 */
4803 if ((dev->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
4804 callout_stop(&dev->callout);
4805 dev->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
4806 }
4807
4808 /*
4809 * Now that we are unfrozen schedule the
4810 * device so any pending transactions are
4811 * run.
4812 */
4813 if ((dev->ccbq.queue.entries > 0)
4814 && (xpt_schedule_dev_sendq(dev->target->bus, dev))
4815 && (run_queue != 0)) {
4816 rundevq = 1;
4817 }
4818 }
4819 }
4820 if (rundevq != 0)
4821 xpt_run_dev_sendq(dev->target->bus);
4822}
4823
4824void
4825xpt_release_simq(struct cam_sim *sim, int run_queue)
4826{
4827 struct camq *sendq;
4828
4829 sim_lock_assert_owned(sim->lock);
4830
4831 if (sim->devq == NULL)
4832 return;
4833
4834 sendq = &(sim->devq->send_queue);
4835 if (sendq->qfrozen_cnt > 0) {
4836 sendq->qfrozen_cnt--;
4837 if (sendq->qfrozen_cnt == 0) {
4838 struct cam_eb *bus;
4839
4840 /*
4841 * If there is a timeout scheduled to release this
4842 * sim queue, remove it. The queue frozen count is
4843 * already at 0.
4844 */
4845 if ((sim->flags & CAM_SIM_REL_TIMEOUT_PENDING) != 0){
4846 callout_stop(&sim->callout);
4847 sim->flags &= ~CAM_SIM_REL_TIMEOUT_PENDING;
4848 }
4849 bus = xpt_find_bus(sim->path_id);
4850
4851 if (run_queue) {
4852 /*
4853 * Now that we are unfrozen run the send queue.
4854 */
4855 xpt_run_dev_sendq(bus);
4856 }
4857 xpt_release_bus(bus);
4858 }
4859 }
4860}
4861
4862void
4863xpt_done(union ccb *done_ccb)
4864{
4865 struct cam_sim *sim;
4866
4867 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done\n"));
4868 if ((done_ccb->ccb_h.func_code & XPT_FC_QUEUED) != 0) {
4869 /*
4870 * Queue up the request for handling by our SWI handler
4871 * any of the "non-immediate" type of ccbs.
4872 */
4873 sim = done_ccb->ccb_h.path->bus->sim;
4874 switch (done_ccb->ccb_h.path->periph->type) {
4875 case CAM_PERIPH_BIO:
4876 spin_lock_wr(&sim->sim_spin);
4877 TAILQ_INSERT_TAIL(&sim->sim_doneq, &done_ccb->ccb_h,
4878 sim_links.tqe);
4879 done_ccb->ccb_h.pinfo.index = CAM_DONEQ_INDEX;
4880 spin_unlock_wr(&sim->sim_spin);
4881 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4882 spin_lock_wr(&cam_simq_spin);
4883 if ((sim->flags & CAM_SIM_ON_DONEQ) == 0) {
4884 TAILQ_INSERT_TAIL(&cam_simq, sim,
4885 links);
4886 sim->flags |= CAM_SIM_ON_DONEQ;
4887 }
4888 spin_unlock_wr(&cam_simq_spin);
4889 }
4890 if ((done_ccb->ccb_h.flags & CAM_POLLED) == 0)
4891 setsoftcambio();
4892 break;
4893 default:
4894 panic("unknown periph type %d",
4895 done_ccb->ccb_h.path->periph->type);
4896 }
4897 }
4898}
4899
4900union ccb *
4901xpt_alloc_ccb(void)
4902{
4903 union ccb *new_ccb;
4904
4905 new_ccb = kmalloc(sizeof(*new_ccb), M_CAMXPT, M_INTWAIT | M_ZERO);
4906 return (new_ccb);
4907}
4908
4909void
4910xpt_free_ccb(union ccb *free_ccb)
4911{
4912 kfree(free_ccb, M_CAMXPT);
4913}
4914
4915
4916
4917/* Private XPT functions */
4918
4919/*
4920 * Get a CAM control block for the caller. Charge the structure to the device
4921 * referenced by the path. If the this device has no 'credits' then the
4922 * device already has the maximum number of outstanding operations under way
4923 * and we return NULL. If we don't have sufficient resources to allocate more
4924 * ccbs, we also return NULL.
4925 */
4926static union ccb *
4927xpt_get_ccb(struct cam_ed *device)
4928{
4929 union ccb *new_ccb;
4930 struct cam_sim *sim;
4931
4932 sim = device->sim;
4933 if ((new_ccb = (union ccb *)SLIST_FIRST(&sim->ccb_freeq)) == NULL) {
4934 new_ccb = xpt_alloc_ccb();
4935 if ((sim->flags & CAM_SIM_MPSAFE) == 0)
4936 callout_init(&new_ccb->ccb_h.timeout_ch);
4937 SLIST_INSERT_HEAD(&sim->ccb_freeq, &new_ccb->ccb_h,
4938 xpt_links.sle);
4939 sim->ccb_count++;
4940 }
4941 cam_ccbq_take_opening(&device->ccbq);
4942 SLIST_REMOVE_HEAD(&sim->ccb_freeq, xpt_links.sle);
4943 return (new_ccb);
4944}
4945
4946static void
4947xpt_release_bus(struct cam_eb *bus)
4948{
4949
4950 if ((--bus->refcount == 0)
4951 && (TAILQ_FIRST(&bus->et_entries) == NULL)) {
4952 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
4953 TAILQ_REMOVE(&xsoftc.xpt_busses, bus, links);
4954 xsoftc.bus_generation++;
4955 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
4956 kfree(bus, M_CAMXPT);
4957 }
4958}
4959
4960static struct cam_et *
4961xpt_alloc_target(struct cam_eb *bus, target_id_t target_id)
4962{
4963 struct cam_et *target;
4964 struct cam_et *cur_target;
4965
4966 target = kmalloc(sizeof(*target), M_CAMXPT, M_INTWAIT);
4967
4968 TAILQ_INIT(&target->ed_entries);
4969 target->bus = bus;
4970 target->target_id = target_id;
4971 target->refcount = 1;
4972 target->generation = 0;
4973 timevalclear(&target->last_reset);
4974 /*
4975 * Hold a reference to our parent bus so it
4976 * will not go away before we do.
4977 */
4978 bus->refcount++;
4979
4980 /* Insertion sort into our bus's target list */
4981 cur_target = TAILQ_FIRST(&bus->et_entries);
4982 while (cur_target != NULL && cur_target->target_id < target_id)
4983 cur_target = TAILQ_NEXT(cur_target, links);
4984
4985 if (cur_target != NULL) {
4986 TAILQ_INSERT_BEFORE(cur_target, target, links);
4987 } else {
4988 TAILQ_INSERT_TAIL(&bus->et_entries, target, links);
4989 }
4990 bus->generation++;
4991 return (target);
4992}
4993
4994static void
4995xpt_release_target(struct cam_eb *bus, struct cam_et *target)
4996{
4997 if (target->refcount == 1) {
4998 KKASSERT(TAILQ_FIRST(&target->ed_entries) == NULL);
4999 TAILQ_REMOVE(&bus->et_entries, target, links);
5000 bus->generation++;
5001 xpt_release_bus(bus);
5002 KKASSERT(target->refcount == 1);
5003 kfree(target, M_CAMXPT);
5004 } else {
5005 --target->refcount;
5006 }
5007}
5008
5009static struct cam_ed *
5010xpt_alloc_device(struct cam_eb *bus, struct cam_et *target, lun_id_t lun_id)
5011{
5012 struct cam_path path;
5013 struct cam_ed *device;
5014 struct cam_devq *devq;
5015 cam_status status;
5016
5017 /*
5018 * Disallow new devices while trying to deregister a sim
5019 */
5020 if (bus->sim->flags & CAM_SIM_DEREGISTERED)
5021 return (NULL);
5022
5023 /*
5024 * Make space for us in the device queue on our bus
5025 */
5026 devq = bus->sim->devq;
5027 if (devq == NULL)
5028 return(NULL);
5029 status = cam_devq_resize(devq, devq->alloc_queue.array_size + 1);
5030
5031 if (status != CAM_REQ_CMP) {
5032 device = NULL;
5033 } else {
5034 device = kmalloc(sizeof(*device), M_CAMXPT, M_INTWAIT);
5035 }
5036
5037 if (device != NULL) {
5038 struct cam_ed *cur_device;
5039
5040 cam_init_pinfo(&device->alloc_ccb_entry.pinfo);
5041 device->alloc_ccb_entry.device = device;
5042 cam_init_pinfo(&device->send_ccb_entry.pinfo);
5043 device->send_ccb_entry.device = device;
5044 device->target = target;
5045 device->lun_id = lun_id;
5046 device->sim = bus->sim;
5047 /* Initialize our queues */
5048 if (camq_init(&device->drvq, 0) != 0) {
5049 kfree(device, M_CAMXPT);
5050 return (NULL);
5051 }
5052 if (cam_ccbq_init(&device->ccbq,
5053 bus->sim->max_dev_openings) != 0) {
5054 camq_fini(&device->drvq);
5055 kfree(device, M_CAMXPT);
5056 return (NULL);
5057 }
5058 SLIST_INIT(&device->asyncs);
5059 SLIST_INIT(&device->periphs);
5060 device->generation = 0;
5061 device->owner = NULL;
5062 /*
5063 * Take the default quirk entry until we have inquiry
5064 * data and can determine a better quirk to use.
5065 */
5066 device->quirk = &xpt_quirk_table[xpt_quirk_table_size - 1];
5067 bzero(&device->inq_data, sizeof(device->inq_data));
5068 device->inq_flags = 0;
5069 device->queue_flags = 0;
5070 device->serial_num = NULL;
5071 device->serial_num_len = 0;
5072 device->qfrozen_cnt = 0;
5073 device->flags = CAM_DEV_UNCONFIGURED;
5074 device->tag_delay_count = 0;
5075 device->tag_saved_openings = 0;
5076 device->refcount = 1;
5077 callout_init(&device->callout);
5078
5079 /*
5080 * Hold a reference to our parent target so it
5081 * will not go away before we do.
5082 */
5083 target->refcount++;
5084
5085 /*
5086 * XXX should be limited by number of CCBs this bus can
5087 * do.
5088 */
5089 bus->sim->max_ccbs += device->ccbq.devq_openings;
5090 /* Insertion sort into our target's device list */
5091 cur_device = TAILQ_FIRST(&target->ed_entries);
5092 while (cur_device != NULL && cur_device->lun_id < lun_id)
5093 cur_device = TAILQ_NEXT(cur_device, links);
5094 if (cur_device != NULL) {
5095 TAILQ_INSERT_BEFORE(cur_device, device, links);
5096 } else {
5097 TAILQ_INSERT_TAIL(&target->ed_entries, device, links);
5098 }
5099 target->generation++;
5100 if (lun_id != CAM_LUN_WILDCARD) {
5101 xpt_compile_path(&path,
5102 NULL,
5103 bus->path_id,
5104 target->target_id,
5105 lun_id);
5106 xpt_devise_transport(&path);
5107 xpt_release_path(&path);
5108 }
5109 }
5110 return (device);
5111}
5112
5113static void
5114xpt_reference_device(struct cam_ed *device)
5115{
5116 ++device->refcount;
5117}
5118
5119static void
5120xpt_release_device(struct cam_eb *bus, struct cam_et *target,
5121 struct cam_ed *device)
5122{
5123 struct cam_devq *devq;
5124
5125 if (device->refcount == 1) {
5126 KKASSERT(device->flags & CAM_DEV_UNCONFIGURED);
5127
5128 if (device->alloc_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX
5129 || device->send_ccb_entry.pinfo.index != CAM_UNQUEUED_INDEX)
5130 panic("Removing device while still queued for ccbs");
5131
5132 if ((device->flags & CAM_DEV_REL_TIMEOUT_PENDING) != 0) {
5133 device->flags &= ~CAM_DEV_REL_TIMEOUT_PENDING;
5134 callout_stop(&device->callout);
5135 }
5136
5137 TAILQ_REMOVE(&target->ed_entries, device,links);
5138 target->generation++;
5139 bus->sim->max_ccbs -= device->ccbq.devq_openings;
5140 if ((devq = bus->sim->devq) != NULL) {
5141 /* Release our slot in the devq */
5142 cam_devq_resize(devq, devq->alloc_queue.array_size - 1);
5143 }
5144 camq_fini(&device->drvq);
5145 camq_fini(&device->ccbq.queue);
5146 xpt_release_target(bus, target);
5147 KKASSERT(device->refcount == 1);
5148 kfree(device, M_CAMXPT);
5149 } else {
5150 --device->refcount;
5151 }
5152}
5153
5154static u_int32_t
5155xpt_dev_ccbq_resize(struct cam_path *path, int newopenings)
5156{
5157 int diff;
5158 int result;
5159 struct cam_ed *dev;
5160
5161 dev = path->device;
5162
5163 diff = newopenings - (dev->ccbq.dev_active + dev->ccbq.dev_openings);
5164 result = cam_ccbq_resize(&dev->ccbq, newopenings);
5165 if (result == CAM_REQ_CMP && (diff < 0)) {
5166 dev->flags |= CAM_DEV_RESIZE_QUEUE_NEEDED;
5167 }
5168 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
5169 || (dev->inq_flags & SID_CmdQue) != 0)
5170 dev->tag_saved_openings = newopenings;
5171 /* Adjust the global limit */
5172 dev->sim->max_ccbs += diff;
5173 return (result);
5174}
5175
5176static struct cam_eb *
5177xpt_find_bus(path_id_t path_id)
5178{
5179 struct cam_eb *bus;
5180
5181 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
5182 TAILQ_FOREACH(bus, &xsoftc.xpt_busses, links) {
5183 if (bus->path_id == path_id) {
5184 bus->refcount++;
5185 break;
5186 }
5187 }
5188 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
5189 return (bus);
5190}
5191
5192static struct cam_et *
5193xpt_find_target(struct cam_eb *bus, target_id_t target_id)
5194{
5195 struct cam_et *target;
5196
5197 TAILQ_FOREACH(target, &bus->et_entries, links) {
5198 if (target->target_id == target_id) {
5199 target->refcount++;
5200 break;
5201 }
5202 }
5203 return (target);
5204}
5205
5206static struct cam_ed *
5207xpt_find_device(struct cam_et *target, lun_id_t lun_id)
5208{
5209 struct cam_ed *device;
5210
5211 TAILQ_FOREACH(device, &target->ed_entries, links) {
5212 if (device->lun_id == lun_id) {
5213 device->refcount++;
5214 break;
5215 }
5216 }
5217 return (device);
5218}
5219
5220typedef struct {
5221 union ccb *request_ccb;
5222 struct ccb_pathinq *cpi;
5223 int counter;
5224} xpt_scan_bus_info;
5225
5226/*
5227 * To start a scan, request_ccb is an XPT_SCAN_BUS ccb.
5228 * As the scan progresses, xpt_scan_bus is used as the
5229 * callback on completion function.
5230 */
5231static void
5232xpt_scan_bus(struct cam_periph *periph, union ccb *request_ccb)
5233{
5234 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5235 ("xpt_scan_bus\n"));
5236 switch (request_ccb->ccb_h.func_code) {
5237 case XPT_SCAN_BUS:
5238 {
5239 xpt_scan_bus_info *scan_info;
5240 union ccb *work_ccb;
5241 struct cam_path *path;
5242 u_int i;
5243 u_int max_target;
5244 u_int initiator_id;
5245
5246 /* Find out the characteristics of the bus */
5247 work_ccb = xpt_alloc_ccb();
5248 xpt_setup_ccb(&work_ccb->ccb_h, request_ccb->ccb_h.path,
5249 request_ccb->ccb_h.pinfo.priority);
5250 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
5251 xpt_action(work_ccb);
5252 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
5253 request_ccb->ccb_h.status = work_ccb->ccb_h.status;
5254 xpt_free_ccb(work_ccb);
5255 xpt_done(request_ccb);
5256 return;
5257 }
5258
5259 if ((work_ccb->cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5260 /*
5261 * Can't scan the bus on an adapter that
5262 * cannot perform the initiator role.
5263 */
5264 request_ccb->ccb_h.status = CAM_REQ_CMP;
5265 xpt_free_ccb(work_ccb);
5266 xpt_done(request_ccb);
5267 return;
5268 }
5269
5270 /* Save some state for use while we probe for devices */
5271 scan_info = (xpt_scan_bus_info *)
5272 kmalloc(sizeof(xpt_scan_bus_info), M_CAMXPT, M_INTWAIT);
5273 scan_info->request_ccb = request_ccb;
5274 scan_info->cpi = &work_ccb->cpi;
5275
5276 /* Cache on our stack so we can work asynchronously */
5277 max_target = scan_info->cpi->max_target;
5278 initiator_id = scan_info->cpi->initiator_id;
5279
5280
5281 /*
5282 * We can scan all targets in parallel, or do it sequentially.
5283 */
5284 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5285 max_target = 0;
5286 scan_info->counter = 0;
5287 } else {
5288 scan_info->counter = scan_info->cpi->max_target + 1;
5289 if (scan_info->cpi->initiator_id < scan_info->counter) {
5290 scan_info->counter--;
5291 }
5292 }
5293
5294 for (i = 0; i <= max_target; i++) {
5295 cam_status status;
5296 if (i == initiator_id)
5297 continue;
5298
5299 status = xpt_create_path(&path, xpt_periph,
5300 request_ccb->ccb_h.path_id,
5301 i, 0);
5302 if (status != CAM_REQ_CMP) {
5303 kprintf("xpt_scan_bus: xpt_create_path failed"
5304 " with status %#x, bus scan halted\n",
5305 status);
5306 kfree(scan_info, M_CAMXPT);
5307 request_ccb->ccb_h.status = status;
5308 xpt_free_ccb(work_ccb);
5309 xpt_done(request_ccb);
5310 break;
5311 }
5312 work_ccb = xpt_alloc_ccb();
5313 xpt_setup_ccb(&work_ccb->ccb_h, path,
5314 request_ccb->ccb_h.pinfo.priority);
5315 work_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5316 work_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5317 work_ccb->ccb_h.ppriv_ptr0 = scan_info;
5318 work_ccb->crcn.flags = request_ccb->crcn.flags;
5319 xpt_action(work_ccb);
5320 }
5321 break;
5322 }
5323 case XPT_SCAN_LUN:
5324 {
5325 cam_status status;
5326 struct cam_path *path;
5327 xpt_scan_bus_info *scan_info;
5328 path_id_t path_id;
5329 target_id_t target_id;
5330 lun_id_t lun_id;
5331
5332 /* Reuse the same CCB to query if a device was really found */
5333 scan_info = (xpt_scan_bus_info *)request_ccb->ccb_h.ppriv_ptr0;
5334 xpt_setup_ccb(&request_ccb->ccb_h, request_ccb->ccb_h.path,
5335 request_ccb->ccb_h.pinfo.priority);
5336 request_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
5337
5338 path_id = request_ccb->ccb_h.path_id;
5339 target_id = request_ccb->ccb_h.target_id;
5340 lun_id = request_ccb->ccb_h.target_lun;
5341 xpt_action(request_ccb);
5342
5343 if (request_ccb->ccb_h.status != CAM_REQ_CMP) {
5344 struct cam_ed *device;
5345 struct cam_et *target;
5346 int phl;
5347
5348 /*
5349 * If we already probed lun 0 successfully, or
5350 * we have additional configured luns on this
5351 * target that might have "gone away", go onto
5352 * the next lun.
5353 */
5354 target = request_ccb->ccb_h.path->target;
5355 /*
5356 * We may touch devices that we don't
5357 * hold references too, so ensure they
5358 * don't disappear out from under us.
5359 * The target above is referenced by the
5360 * path in the request ccb.
5361 */
5362 phl = 0;
5363 device = TAILQ_FIRST(&target->ed_entries);
5364 if (device != NULL) {
5365 phl = CAN_SRCH_HI_SPARSE(device);
5366 if (device->lun_id == 0)
5367 device = TAILQ_NEXT(device, links);
5368 }
5369 if ((lun_id != 0) || (device != NULL)) {
5370 if (lun_id < (CAM_SCSI2_MAXLUN-1) || phl)
5371 lun_id++;
5372 }
5373 } else {
5374 struct cam_ed *device;
5375
5376 device = request_ccb->ccb_h.path->device;
5377
5378 if ((device->quirk->quirks & CAM_QUIRK_NOLUNS) == 0) {
5379 /* Try the next lun */
5380 if (lun_id < (CAM_SCSI2_MAXLUN-1)
5381 || CAN_SRCH_HI_DENSE(device))
5382 lun_id++;
5383 }
5384 }
5385
5386 /*
5387 * Free the current request path- we're done with it.
5388 */
5389 xpt_free_path(request_ccb->ccb_h.path);
5390
5391 /*
5392 * Check to see if we scan any further luns.
5393 */
5394 if (lun_id == request_ccb->ccb_h.target_lun
5395 || lun_id > scan_info->cpi->max_lun) {
5396 int done;
5397
5398 hop_again:
5399 done = 0;
5400 if (scan_info->cpi->hba_misc & PIM_SEQSCAN) {
5401 scan_info->counter++;
5402 if (scan_info->counter ==
5403 scan_info->cpi->initiator_id) {
5404 scan_info->counter++;
5405 }
5406 if (scan_info->counter >=
5407 scan_info->cpi->max_target+1) {
5408 done = 1;
5409 }
5410 } else {
5411 scan_info->counter--;
5412 if (scan_info->counter == 0) {
5413 done = 1;
5414 }
5415 }
5416 if (done) {
5417 xpt_free_ccb(request_ccb);
5418 xpt_free_ccb((union ccb *)scan_info->cpi);
5419 request_ccb = scan_info->request_ccb;
5420 kfree(scan_info, M_CAMXPT);
5421 request_ccb->ccb_h.status = CAM_REQ_CMP;
5422 xpt_done(request_ccb);
5423 break;
5424 }
5425
5426 if ((scan_info->cpi->hba_misc & PIM_SEQSCAN) == 0) {
5427 break;
5428 }
5429 status = xpt_create_path(&path, xpt_periph,
5430 scan_info->request_ccb->ccb_h.path_id,
5431 scan_info->counter, 0);
5432 if (status != CAM_REQ_CMP) {
5433 kprintf("xpt_scan_bus: xpt_create_path failed"
5434 " with status %#x, bus scan halted\n",
5435 status);
5436 xpt_free_ccb(request_ccb);
5437 xpt_free_ccb((union ccb *)scan_info->cpi);
5438 request_ccb = scan_info->request_ccb;
5439 kfree(scan_info, M_CAMXPT);
5440 request_ccb->ccb_h.status = status;
5441 xpt_done(request_ccb);
5442 break;
5443 }
5444 xpt_setup_ccb(&request_ccb->ccb_h, path,
5445 request_ccb->ccb_h.pinfo.priority);
5446 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5447 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5448 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5449 request_ccb->crcn.flags =
5450 scan_info->request_ccb->crcn.flags;
5451 } else {
5452 status = xpt_create_path(&path, xpt_periph,
5453 path_id, target_id, lun_id);
5454 if (status != CAM_REQ_CMP) {
5455 kprintf("xpt_scan_bus: xpt_create_path failed "
5456 "with status %#x, halting LUN scan\n",
5457 status);
5458 goto hop_again;
5459 }
5460 xpt_setup_ccb(&request_ccb->ccb_h, path,
5461 request_ccb->ccb_h.pinfo.priority);
5462 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5463 request_ccb->ccb_h.cbfcnp = xpt_scan_bus;
5464 request_ccb->ccb_h.ppriv_ptr0 = scan_info;
5465 request_ccb->crcn.flags =
5466 scan_info->request_ccb->crcn.flags;
5467 }
5468 xpt_action(request_ccb);
5469 break;
5470 }
5471 default:
5472 break;
5473 }
5474}
5475
5476typedef enum {
5477 PROBE_TUR,
5478 PROBE_INQUIRY, /* this counts as DV0 for Basic Domain Validation */
5479 PROBE_FULL_INQUIRY,
5480 PROBE_MODE_SENSE,
5481 PROBE_SERIAL_NUM_0,
5482 PROBE_SERIAL_NUM_1,
5483 PROBE_TUR_FOR_NEGOTIATION,
5484 PROBE_INQUIRY_BASIC_DV1,
5485 PROBE_INQUIRY_BASIC_DV2,
5486 PROBE_DV_EXIT
5487} probe_action;
5488
5489typedef enum {
5490 PROBE_INQUIRY_CKSUM = 0x01,
5491 PROBE_SERIAL_CKSUM = 0x02,
5492 PROBE_NO_ANNOUNCE = 0x04
5493} probe_flags;
5494
5495typedef struct {
5496 TAILQ_HEAD(, ccb_hdr) request_ccbs;
5497 probe_action action;
5498 union ccb saved_ccb;
5499 probe_flags flags;
5500 MD5_CTX context;
5501 u_int8_t digest[16];
5502} probe_softc;
5503
5504static void
5505xpt_scan_lun(struct cam_periph *periph, struct cam_path *path,
5506 cam_flags flags, union ccb *request_ccb)
5507{
5508 struct ccb_pathinq cpi;
5509 cam_status status;
5510 struct cam_path *new_path;
5511 struct cam_periph *old_periph;
5512
5513 CAM_DEBUG(request_ccb->ccb_h.path, CAM_DEBUG_TRACE,
5514 ("xpt_scan_lun\n"));
5515
5516 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
5517 cpi.ccb_h.func_code = XPT_PATH_INQ;
5518 xpt_action((union ccb *)&cpi);
5519
5520 if (cpi.ccb_h.status != CAM_REQ_CMP) {
5521 if (request_ccb != NULL) {
5522 request_ccb->ccb_h.status = cpi.ccb_h.status;
5523 xpt_done(request_ccb);
5524 }
5525 return;
5526 }
5527
5528 if ((cpi.hba_misc & PIM_NOINITIATOR) != 0) {
5529 /*
5530 * Can't scan the bus on an adapter that
5531 * cannot perform the initiator role.
5532 */
5533 if (request_ccb != NULL) {
5534 request_ccb->ccb_h.status = CAM_REQ_CMP;
5535 xpt_done(request_ccb);
5536 }
5537 return;
5538 }
5539
5540 if (request_ccb == NULL) {
5541 request_ccb = kmalloc(sizeof(union ccb), M_CAMXPT, M_INTWAIT);
5542 new_path = kmalloc(sizeof(*new_path), M_CAMXPT, M_INTWAIT);
5543 status = xpt_compile_path(new_path, xpt_periph,
5544 path->bus->path_id,
5545 path->target->target_id,
5546 path->device->lun_id);
5547
5548 if (status != CAM_REQ_CMP) {
5549 xpt_print(path, "xpt_scan_lun: can't compile path, "
5550 "can't continue\n");
5551 kfree(request_ccb, M_CAMXPT);
5552 kfree(new_path, M_CAMXPT);
5553 return;
5554 }
5555 xpt_setup_ccb(&request_ccb->ccb_h, new_path, /*priority*/ 1);
5556 request_ccb->ccb_h.cbfcnp = xptscandone;
5557 request_ccb->ccb_h.func_code = XPT_SCAN_LUN;
5558 request_ccb->crcn.flags = flags;
5559 }
5560
5561 if ((old_periph = cam_periph_find(path, "probe")) != NULL) {
5562 probe_softc *softc;
5563
5564 softc = (probe_softc *)old_periph->softc;
5565 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5566 periph_links.tqe);
5567 } else {
5568 status = cam_periph_alloc(proberegister, NULL, probecleanup,
5569 probestart, "probe",
5570 CAM_PERIPH_BIO,
5571 request_ccb->ccb_h.path, NULL, 0,
5572 request_ccb);
5573
5574 if (status != CAM_REQ_CMP) {
5575 xpt_print(path, "xpt_scan_lun: cam_alloc_periph "
5576 "returned an error, can't continue probe\n");
5577 request_ccb->ccb_h.status = status;
5578 xpt_done(request_ccb);
5579 }
5580 }
5581}
5582
5583static void
5584xptscandone(struct cam_periph *periph, union ccb *done_ccb)
5585{
5586 xpt_release_path(done_ccb->ccb_h.path);
5587 kfree(done_ccb->ccb_h.path, M_CAMXPT);
5588 kfree(done_ccb, M_CAMXPT);
5589}
5590
5591static cam_status
5592proberegister(struct cam_periph *periph, void *arg)
5593{
5594 union ccb *request_ccb; /* CCB representing the probe request */
5595 cam_status status;
5596 probe_softc *softc;
5597
5598 request_ccb = (union ccb *)arg;
5599 if (periph == NULL) {
5600 kprintf("proberegister: periph was NULL!!\n");
5601 return(CAM_REQ_CMP_ERR);
5602 }
5603
5604 if (request_ccb == NULL) {
5605 kprintf("proberegister: no probe CCB, "
5606 "can't register device\n");
5607 return(CAM_REQ_CMP_ERR);
5608 }
5609
5610 softc = kmalloc(sizeof(*softc), M_CAMXPT, M_INTWAIT | M_ZERO);
5611 TAILQ_INIT(&softc->request_ccbs);
5612 TAILQ_INSERT_TAIL(&softc->request_ccbs, &request_ccb->ccb_h,
5613 periph_links.tqe);
5614 softc->flags = 0;
5615 periph->softc = softc;
5616 status = cam_periph_acquire(periph);
5617 if (status != CAM_REQ_CMP) {
5618 return (status);
5619 }
5620
5621
5622 /*
5623 * Ensure we've waited at least a bus settle
5624 * delay before attempting to probe the device.
5625 * For HBAs that don't do bus resets, this won't make a difference.
5626 */
5627 cam_periph_freeze_after_event(periph, &periph->path->bus->last_reset,
5628 scsi_delay);
5629 probeschedule(periph);
5630 return(CAM_REQ_CMP);
5631}
5632
5633static void
5634probeschedule(struct cam_periph *periph)
5635{
5636 struct ccb_pathinq cpi;
5637 union ccb *ccb;
5638 probe_softc *softc;
5639
5640 softc = (probe_softc *)periph->softc;
5641 ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
5642
5643 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1);
5644 cpi.ccb_h.func_code = XPT_PATH_INQ;
5645 xpt_action((union ccb *)&cpi);
5646
5647 /*
5648 * If a device has gone away and another device, or the same one,
5649 * is back in the same place, it should have a unit attention
5650 * condition pending. It will not report the unit attention in
5651 * response to an inquiry, which may leave invalid transfer
5652 * negotiations in effect. The TUR will reveal the unit attention
5653 * condition. Only send the TUR for lun 0, since some devices
5654 * will get confused by commands other than inquiry to non-existent
5655 * luns. If you think a device has gone away start your scan from
5656 * lun 0. This will insure that any bogus transfer settings are
5657 * invalidated.
5658 *
5659 * If we haven't seen the device before and the controller supports
5660 * some kind of transfer negotiation, negotiate with the first
5661 * sent command if no bus reset was performed at startup. This
5662 * ensures that the device is not confused by transfer negotiation
5663 * settings left over by loader or BIOS action.
5664 */
5665 if (((ccb->ccb_h.path->device->flags & CAM_DEV_UNCONFIGURED) == 0)
5666 && (ccb->ccb_h.target_lun == 0)) {
5667 softc->action = PROBE_TUR;
5668 } else if ((cpi.hba_inquiry & (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE)) != 0
5669 && (cpi.hba_misc & PIM_NOBUSRESET) != 0) {
5670 proberequestdefaultnegotiation(periph);
5671 softc->action = PROBE_INQUIRY;
5672 } else {
5673 softc->action = PROBE_INQUIRY;
5674 }
5675
5676 if (ccb->crcn.flags & CAM_EXPECT_INQ_CHANGE)
5677 softc->flags |= PROBE_NO_ANNOUNCE;
5678 else
5679 softc->flags &= ~PROBE_NO_ANNOUNCE;
5680
5681 xpt_schedule(periph, ccb->ccb_h.pinfo.priority);
5682}
5683
5684static void
5685probestart(struct cam_periph *periph, union ccb *start_ccb)
5686{
5687 /* Probe the device that our peripheral driver points to */
5688 struct ccb_scsiio *csio;
5689 probe_softc *softc;
5690
5691 CAM_DEBUG(start_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probestart\n"));
5692
5693 softc = (probe_softc *)periph->softc;
5694 csio = &start_ccb->csio;
5695
5696 switch (softc->action) {
5697 case PROBE_TUR:
5698 case PROBE_TUR_FOR_NEGOTIATION:
5699 case PROBE_DV_EXIT:
5700 {
5701 scsi_test_unit_ready(csio,
5702 /*retries*/4,
5703 probedone,
5704 MSG_SIMPLE_Q_TAG,
5705 SSD_FULL_SIZE,
5706 /*timeout*/60000);
5707 break;
5708 }
5709 case PROBE_INQUIRY:
5710 case PROBE_FULL_INQUIRY:
5711 case PROBE_INQUIRY_BASIC_DV1:
5712 case PROBE_INQUIRY_BASIC_DV2:
5713 {
5714 u_int inquiry_len;
5715 struct scsi_inquiry_data *inq_buf;
5716
5717 inq_buf = &periph->path->device->inq_data;
5718
5719 /*
5720 * If the device is currently configured, we calculate an
5721 * MD5 checksum of the inquiry data, and if the serial number
5722 * length is greater than 0, add the serial number data
5723 * into the checksum as well. Once the inquiry and the
5724 * serial number check finish, we attempt to figure out
5725 * whether we still have the same device.
5726 */
5727 if ((periph->path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
5728
5729 MD5Init(&softc->context);
5730 MD5Update(&softc->context, (unsigned char *)inq_buf,
5731 sizeof(struct scsi_inquiry_data));
5732 softc->flags |= PROBE_INQUIRY_CKSUM;
5733 if (periph->path->device->serial_num_len > 0) {
5734 MD5Update(&softc->context,
5735 periph->path->device->serial_num,
5736 periph->path->device->serial_num_len);
5737 softc->flags |= PROBE_SERIAL_CKSUM;
5738 }
5739 MD5Final(softc->digest, &softc->context);
5740 }
5741
5742 if (softc->action == PROBE_INQUIRY)
5743 inquiry_len = SHORT_INQUIRY_LENGTH;
5744 else
5745 inquiry_len = SID_ADDITIONAL_LENGTH(inq_buf);
5746
5747 /*
5748 * Some parallel SCSI devices fail to send an
5749 * ignore wide residue message when dealing with
5750 * odd length inquiry requests. Round up to be
5751 * safe.
5752 */
5753 inquiry_len = roundup2(inquiry_len, 2);
5754
5755 if (softc->action == PROBE_INQUIRY_BASIC_DV1
5756 || softc->action == PROBE_INQUIRY_BASIC_DV2) {
5757 inq_buf = kmalloc(inquiry_len, M_CAMXPT, M_INTWAIT);
5758 }
5759 scsi_inquiry(csio,
5760 /*retries*/4,
5761 probedone,
5762 MSG_SIMPLE_Q_TAG,
5763 (u_int8_t *)inq_buf,
5764 inquiry_len,
5765 /*evpd*/FALSE,
5766 /*page_code*/0,
5767 SSD_MIN_SIZE,
5768 /*timeout*/60 * 1000);
5769 break;
5770 }
5771 case PROBE_MODE_SENSE:
5772 {
5773 void *mode_buf;
5774 int mode_buf_len;
5775
5776 mode_buf_len = sizeof(struct scsi_mode_header_6)
5777 + sizeof(struct scsi_mode_blk_desc)
5778 + sizeof(struct scsi_control_page);
5779 mode_buf = kmalloc(mode_buf_len, M_CAMXPT, M_INTWAIT);
5780 scsi_mode_sense(csio,
5781 /*retries*/4,
5782 probedone,
5783 MSG_SIMPLE_Q_TAG,
5784 /*dbd*/FALSE,
5785 SMS_PAGE_CTRL_CURRENT,
5786 SMS_CONTROL_MODE_PAGE,
5787 mode_buf,
5788 mode_buf_len,
5789 SSD_FULL_SIZE,
5790 /*timeout*/60000);
5791 break;
5792 }
5793 case PROBE_SERIAL_NUM_0:
5794 {
5795 struct scsi_vpd_supported_page_list *vpd_list = NULL;
5796 struct cam_ed *device;
5797
5798 device = periph->path->device;
5799 if ((device->quirk->quirks & CAM_QUIRK_NOSERIAL) == 0) {
5800 vpd_list = kmalloc(sizeof(*vpd_list), M_CAMXPT,
5801 M_INTWAIT | M_ZERO);
5802 }
5803
5804 if (vpd_list != NULL) {
5805 scsi_inquiry(csio,
5806 /*retries*/4,
5807 probedone,
5808 MSG_SIMPLE_Q_TAG,
5809 (u_int8_t *)vpd_list,
5810 sizeof(*vpd_list),
5811 /*evpd*/TRUE,
5812 SVPD_SUPPORTED_PAGE_LIST,
5813 SSD_MIN_SIZE,
5814 /*timeout*/60 * 1000);
5815 break;
5816 }
5817 /*
5818 * We'll have to do without, let our probedone
5819 * routine finish up for us.
5820 */
5821 start_ccb->csio.data_ptr = NULL;
5822 probedone(periph, start_ccb);
5823 return;
5824 }
5825 case PROBE_SERIAL_NUM_1:
5826 {
5827 struct scsi_vpd_unit_serial_number *serial_buf;
5828 struct cam_ed* device;
5829
5830 serial_buf = NULL;
5831 device = periph->path->device;
5832 device->serial_num = NULL;
5833 device->serial_num_len = 0;
5834
5835 serial_buf = (struct scsi_vpd_unit_serial_number *)
5836 kmalloc(sizeof(*serial_buf), M_CAMXPT,
5837 M_INTWAIT | M_ZERO);
5838 scsi_inquiry(csio,
5839 /*retries*/4,
5840 probedone,
5841 MSG_SIMPLE_Q_TAG,
5842 (u_int8_t *)serial_buf,
5843 sizeof(*serial_buf),
5844 /*evpd*/TRUE,
5845 SVPD_UNIT_SERIAL_NUMBER,
5846 SSD_MIN_SIZE,
5847 /*timeout*/60 * 1000);
5848 break;
5849 }
5850 }
5851 xpt_action(start_ccb);
5852}
5853
5854static void
5855proberequestdefaultnegotiation(struct cam_periph *periph)
5856{
5857 struct ccb_trans_settings cts;
5858
5859 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5860 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5861 cts.type = CTS_TYPE_USER_SETTINGS;
5862 xpt_action((union ccb *)&cts);
5863 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5864 return;
5865 }
5866 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5867 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5868 xpt_action((union ccb *)&cts);
5869}
5870
5871/*
5872 * Backoff Negotiation Code- only pertinent for SPI devices.
5873 */
5874static int
5875proberequestbackoff(struct cam_periph *periph, struct cam_ed *device)
5876{
5877 struct ccb_trans_settings cts;
5878 struct ccb_trans_settings_spi *spi;
5879
5880 memset(&cts, 0, sizeof (cts));
5881 xpt_setup_ccb(&cts.ccb_h, periph->path, /*priority*/1);
5882 cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
5883 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5884 xpt_action((union ccb *)&cts);
5885 if ((cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5886 if (bootverbose) {
5887 xpt_print(periph->path,
5888 "failed to get current device settings\n");
5889 }
5890 return (0);
5891 }
5892 if (cts.transport != XPORT_SPI) {
5893 if (bootverbose) {
5894 xpt_print(periph->path, "not SPI transport\n");
5895 }
5896 return (0);
5897 }
5898 spi = &cts.xport_specific.spi;
5899
5900 /*
5901 * We cannot renegotiate sync rate if we don't have one.
5902 */
5903 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) {
5904 if (bootverbose) {
5905 xpt_print(periph->path, "no sync rate known\n");
5906 }
5907 return (0);
5908 }
5909
5910 /*
5911 * We'll assert that we don't have to touch PPR options- the
5912 * SIM will see what we do with period and offset and adjust
5913 * the PPR options as appropriate.
5914 */
5915
5916 /*
5917 * A sync rate with unknown or zero offset is nonsensical.
5918 * A sync period of zero means Async.
5919 */
5920 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0
5921 || spi->sync_offset == 0 || spi->sync_period == 0) {
5922 if (bootverbose) {
5923 xpt_print(periph->path, "no sync rate available\n");
5924 }
5925 return (0);
5926 }
5927
5928 if (device->flags & CAM_DEV_DV_HIT_BOTTOM) {
5929 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5930 ("hit async: giving up on DV\n"));
5931 return (0);
5932 }
5933
5934
5935 /*
5936 * Jump sync_period up by one, but stop at 5MHz and fall back to Async.
5937 * We don't try to remember 'last' settings to see if the SIM actually
5938 * gets into the speed we want to set. We check on the SIM telling
5939 * us that a requested speed is bad, but otherwise don't try and
5940 * check the speed due to the asynchronous and handshake nature
5941 * of speed setting.
5942 */
5943 spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET;
5944 for (;;) {
5945 spi->sync_period++;
5946 if (spi->sync_period >= 0xf) {
5947 spi->sync_period = 0;
5948 spi->sync_offset = 0;
5949 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5950 ("setting to async for DV\n"));
5951 /*
5952 * Once we hit async, we don't want to try
5953 * any more settings.
5954 */
5955 device->flags |= CAM_DEV_DV_HIT_BOTTOM;
5956 } else if (bootverbose) {
5957 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5958 ("DV: period 0x%x\n", spi->sync_period));
5959 kprintf("setting period to 0x%x\n", spi->sync_period);
5960 }
5961 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
5962 cts.type = CTS_TYPE_CURRENT_SETTINGS;
5963 xpt_action((union ccb *)&cts);
5964 if ((cts.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
5965 break;
5966 }
5967 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
5968 ("DV: failed to set period 0x%x\n", spi->sync_period));
5969 if (spi->sync_period == 0) {
5970 return (0);
5971 }
5972 }
5973 return (1);
5974}
5975
5976static void
5977probedone(struct cam_periph *periph, union ccb *done_ccb)
5978{
5979 probe_softc *softc;
5980 struct cam_path *path;
5981 u_int32_t priority;
5982
5983 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("probedone\n"));
5984
5985 softc = (probe_softc *)periph->softc;
5986 path = done_ccb->ccb_h.path;
5987 priority = done_ccb->ccb_h.pinfo.priority;
5988
5989 switch (softc->action) {
5990 case PROBE_TUR:
5991 {
5992 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
5993
5994 if (cam_periph_error(done_ccb, 0,
5995 SF_NO_PRINT, NULL) == ERESTART)
5996 return;
5997 else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
5998 /* Don't wedge the queue */
5999 xpt_release_devq(done_ccb->ccb_h.path,
6000 /*count*/1,
6001 /*run_queue*/TRUE);
6002 }
6003 softc->action = PROBE_INQUIRY;
6004 xpt_release_ccb(done_ccb);
6005 xpt_schedule(periph, priority);
6006 return;
6007 }
6008 case PROBE_INQUIRY:
6009 case PROBE_FULL_INQUIRY:
6010 {
6011 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6012 struct scsi_inquiry_data *inq_buf;
6013 u_int8_t periph_qual;
6014
6015 path->device->flags |= CAM_DEV_INQUIRY_DATA_VALID;
6016 inq_buf = &path->device->inq_data;
6017
6018 periph_qual = SID_QUAL(inq_buf);
6019
6020 switch(periph_qual) {
6021 case SID_QUAL_LU_CONNECTED:
6022 {
6023 u_int8_t len;
6024
6025 /*
6026 * We conservatively request only
6027 * SHORT_INQUIRY_LEN bytes of inquiry
6028 * information during our first try
6029 * at sending an INQUIRY. If the device
6030 * has more information to give,
6031 * perform a second request specifying
6032 * the amount of information the device
6033 * is willing to give.
6034 */
6035 len = inq_buf->additional_length
6036 + offsetof(struct scsi_inquiry_data,
6037 additional_length) + 1;
6038 if (softc->action == PROBE_INQUIRY
6039 && len > SHORT_INQUIRY_LENGTH) {
6040 softc->action = PROBE_FULL_INQUIRY;
6041 xpt_release_ccb(done_ccb);
6042 xpt_schedule(periph, priority);
6043 return;
6044 }
6045
6046 xpt_find_quirk(path->device);
6047
6048 xpt_devise_transport(path);
6049 if (INQ_DATA_TQ_ENABLED(inq_buf))
6050 softc->action = PROBE_MODE_SENSE;
6051 else
6052 softc->action = PROBE_SERIAL_NUM_0;
6053
6054 path->device->flags &= ~CAM_DEV_UNCONFIGURED;
6055 xpt_reference_device(path->device);
6056
6057 xpt_release_ccb(done_ccb);
6058 xpt_schedule(periph, priority);
6059 return;
6060 }
6061 default:
6062 break;
6063 }
6064 } else if (cam_periph_error(done_ccb, 0,
6065 done_ccb->ccb_h.target_lun > 0
6066 ? SF_RETRY_UA|SF_QUIET_IR
6067 : SF_RETRY_UA,
6068 &softc->saved_ccb) == ERESTART) {
6069 return;
6070 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6071 /* Don't wedge the queue */
6072 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6073 /*run_queue*/TRUE);
6074 }
6075 /*
6076 * If we get to this point, we got an error status back
6077 * from the inquiry and the error status doesn't require
6078 * automatically retrying the command. Therefore, the
6079 * inquiry failed. If we had inquiry information before
6080 * for this device, but this latest inquiry command failed,
6081 * the device has probably gone away. If this device isn't
6082 * already marked unconfigured, notify the peripheral
6083 * drivers that this device is no more.
6084 */
6085 if ((path->device->flags & CAM_DEV_UNCONFIGURED) == 0) {
6086 /* Send the async notification. */
6087 xpt_async(AC_LOST_DEVICE, path, NULL);
6088 }
6089
6090 xpt_release_ccb(done_ccb);
6091 break;
6092 }
6093 case PROBE_MODE_SENSE:
6094 {
6095 struct ccb_scsiio *csio;
6096 struct scsi_mode_header_6 *mode_hdr;
6097
6098 csio = &done_ccb->csio;
6099 mode_hdr = (struct scsi_mode_header_6 *)csio->data_ptr;
6100 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) {
6101 struct scsi_control_page *page;
6102 u_int8_t *offset;
6103
6104 offset = ((u_int8_t *)&mode_hdr[1])
6105 + mode_hdr->blk_desc_len;
6106 page = (struct scsi_control_page *)offset;
6107 path->device->queue_flags = page->queue_flags;
6108 } else if (cam_periph_error(done_ccb, 0,
6109 SF_RETRY_UA|SF_NO_PRINT,
6110 &softc->saved_ccb) == ERESTART) {
6111 return;
6112 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6113 /* Don't wedge the queue */
6114 xpt_release_devq(done_ccb->ccb_h.path,
6115 /*count*/1, /*run_queue*/TRUE);
6116 }
6117 xpt_release_ccb(done_ccb);
6118 kfree(mode_hdr, M_CAMXPT);
6119 softc->action = PROBE_SERIAL_NUM_0;
6120 xpt_schedule(periph, priority);
6121 return;
6122 }
6123 case PROBE_SERIAL_NUM_0:
6124 {
6125 struct ccb_scsiio *csio;
6126 struct scsi_vpd_supported_page_list *page_list;
6127 int length, serialnum_supported, i;
6128
6129 serialnum_supported = 0;
6130 csio = &done_ccb->csio;
6131 page_list =
6132 (struct scsi_vpd_supported_page_list *)csio->data_ptr;
6133
6134 if (page_list == NULL) {
6135 /*
6136 * Don't process the command as it was never sent
6137 */
6138 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6139 && (page_list->length > 0)) {
6140 length = min(page_list->length,
6141 SVPD_SUPPORTED_PAGES_SIZE);
6142 for (i = 0; i < length; i++) {
6143 if (page_list->list[i] ==
6144 SVPD_UNIT_SERIAL_NUMBER) {
6145 serialnum_supported = 1;
6146 break;
6147 }
6148 }
6149 } else if (cam_periph_error(done_ccb, 0,
6150 SF_RETRY_UA|SF_NO_PRINT,
6151 &softc->saved_ccb) == ERESTART) {
6152 return;
6153 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6154 /* Don't wedge the queue */
6155 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6156 /*run_queue*/TRUE);
6157 }
6158
6159 if (page_list != NULL)
6160 kfree(page_list, M_DEVBUF);
6161
6162 if (serialnum_supported) {
6163 xpt_release_ccb(done_ccb);
6164 softc->action = PROBE_SERIAL_NUM_1;
6165 xpt_schedule(periph, priority);
6166 return;
6167 }
6168 xpt_release_ccb(done_ccb);
6169 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6170 xpt_schedule(periph, done_ccb->ccb_h.pinfo.priority);
6171 return;
6172 }
6173
6174 case PROBE_SERIAL_NUM_1:
6175 {
6176 struct ccb_scsiio *csio;
6177 struct scsi_vpd_unit_serial_number *serial_buf;
6178 u_int32_t priority;
6179 int changed;
6180 int have_serialnum;
6181
6182 changed = 1;
6183 have_serialnum = 0;
6184 csio = &done_ccb->csio;
6185 priority = done_ccb->ccb_h.pinfo.priority;
6186 serial_buf =
6187 (struct scsi_vpd_unit_serial_number *)csio->data_ptr;
6188
6189 /* Clean up from previous instance of this device */
6190 if (path->device->serial_num != NULL) {
6191 kfree(path->device->serial_num, M_CAMXPT);
6192 path->device->serial_num = NULL;
6193 path->device->serial_num_len = 0;
6194 }
6195
6196 if (serial_buf == NULL) {
6197 /*
6198 * Don't process the command as it was never sent
6199 */
6200 } else if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP
6201 && (serial_buf->length > 0)) {
6202
6203 have_serialnum = 1;
6204 path->device->serial_num =
6205 kmalloc((serial_buf->length + 1),
6206 M_CAMXPT, M_INTWAIT);
6207 bcopy(serial_buf->serial_num,
6208 path->device->serial_num,
6209 serial_buf->length);
6210 path->device->serial_num_len = serial_buf->length;
6211 path->device->serial_num[serial_buf->length] = '\0';
6212 } else if (cam_periph_error(done_ccb, 0,
6213 SF_RETRY_UA|SF_NO_PRINT,
6214 &softc->saved_ccb) == ERESTART) {
6215 return;
6216 } else if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6217 /* Don't wedge the queue */
6218 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6219 /*run_queue*/TRUE);
6220 }
6221
6222 /*
6223 * Let's see if we have seen this device before.
6224 */
6225 if ((softc->flags & PROBE_INQUIRY_CKSUM) != 0) {
6226 MD5_CTX context;
6227 u_int8_t digest[16];
6228
6229 MD5Init(&context);
6230
6231 MD5Update(&context,
6232 (unsigned char *)&path->device->inq_data,
6233 sizeof(struct scsi_inquiry_data));
6234
6235 if (have_serialnum)
6236 MD5Update(&context, serial_buf->serial_num,
6237 serial_buf->length);
6238
6239 MD5Final(digest, &context);
6240 if (bcmp(softc->digest, digest, 16) == 0)
6241 changed = 0;
6242
6243 /*
6244 * XXX Do we need to do a TUR in order to ensure
6245 * that the device really hasn't changed???
6246 */
6247 if ((changed != 0)
6248 && ((softc->flags & PROBE_NO_ANNOUNCE) == 0))
6249 xpt_async(AC_LOST_DEVICE, path, NULL);
6250 }
6251 if (serial_buf != NULL)
6252 kfree(serial_buf, M_CAMXPT);
6253
6254 if (changed != 0) {
6255 /*
6256 * Now that we have all the necessary
6257 * information to safely perform transfer
6258 * negotiations... Controllers don't perform
6259 * any negotiation or tagged queuing until
6260 * after the first XPT_SET_TRAN_SETTINGS ccb is
6261 * received. So, on a new device, just retrieve
6262 * the user settings, and set them as the current
6263 * settings to set the device up.
6264 */
6265 proberequestdefaultnegotiation(periph);
6266 xpt_release_ccb(done_ccb);
6267
6268 /*
6269 * Perform a TUR to allow the controller to
6270 * perform any necessary transfer negotiation.
6271 */
6272 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6273 xpt_schedule(periph, priority);
6274 return;
6275 }
6276 xpt_release_ccb(done_ccb);
6277 break;
6278 }
6279 case PROBE_TUR_FOR_NEGOTIATION:
6280 case PROBE_DV_EXIT:
6281 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6282 /* Don't wedge the queue */
6283 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6284 /*run_queue*/TRUE);
6285 }
6286
6287 xpt_reference_device(path->device);
6288 /*
6289 * Do Domain Validation for lun 0 on devices that claim
6290 * to support Synchronous Transfer modes.
6291 */
6292 if (softc->action == PROBE_TUR_FOR_NEGOTIATION
6293 && done_ccb->ccb_h.target_lun == 0
6294 && (path->device->inq_data.flags & SID_Sync) != 0
6295 && (path->device->flags & CAM_DEV_IN_DV) == 0) {
6296 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6297 ("Begin Domain Validation\n"));
6298 path->device->flags |= CAM_DEV_IN_DV;
6299 xpt_release_ccb(done_ccb);
6300 softc->action = PROBE_INQUIRY_BASIC_DV1;
6301 xpt_schedule(periph, priority);
6302 return;
6303 }
6304 if (softc->action == PROBE_DV_EXIT) {
6305 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6306 ("Leave Domain Validation\n"));
6307 }
6308 path->device->flags &=
6309 ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6310 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6311 /* Inform the XPT that a new device has been found */
6312 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6313 xpt_action(done_ccb);
6314 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6315 done_ccb);
6316 }
6317 xpt_release_ccb(done_ccb);
6318 break;
6319 case PROBE_INQUIRY_BASIC_DV1:
6320 case PROBE_INQUIRY_BASIC_DV2:
6321 {
6322 struct scsi_inquiry_data *nbuf;
6323 struct ccb_scsiio *csio;
6324
6325 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) {
6326 /* Don't wedge the queue */
6327 xpt_release_devq(done_ccb->ccb_h.path, /*count*/1,
6328 /*run_queue*/TRUE);
6329 }
6330 csio = &done_ccb->csio;
6331 nbuf = (struct scsi_inquiry_data *)csio->data_ptr;
6332 if (bcmp(nbuf, &path->device->inq_data, SHORT_INQUIRY_LENGTH)) {
6333 xpt_print(path,
6334 "inquiry data fails comparison at DV%d step\n",
6335 softc->action == PROBE_INQUIRY_BASIC_DV1 ? 1 : 2);
6336 if (proberequestbackoff(periph, path->device)) {
6337 path->device->flags &= ~CAM_DEV_IN_DV;
6338 softc->action = PROBE_TUR_FOR_NEGOTIATION;
6339 } else {
6340 /* give up */
6341 softc->action = PROBE_DV_EXIT;
6342 }
6343 kfree(nbuf, M_CAMXPT);
6344 xpt_release_ccb(done_ccb);
6345 xpt_schedule(periph, priority);
6346 return;
6347 }
6348 kfree(nbuf, M_CAMXPT);
6349 if (softc->action == PROBE_INQUIRY_BASIC_DV1) {
6350 softc->action = PROBE_INQUIRY_BASIC_DV2;
6351 xpt_release_ccb(done_ccb);
6352 xpt_schedule(periph, priority);
6353 return;
6354 }
6355 if (softc->action == PROBE_DV_EXIT) {
6356 CAM_DEBUG(periph->path, CAM_DEBUG_INFO,
6357 ("Leave Domain Validation Successfully\n"));
6358 }
6359 path->device->flags &=
6360 ~(CAM_DEV_UNCONFIGURED|CAM_DEV_IN_DV|CAM_DEV_DV_HIT_BOTTOM);
6361 if ((softc->flags & PROBE_NO_ANNOUNCE) == 0) {
6362 /* Inform the XPT that a new device has been found */
6363 done_ccb->ccb_h.func_code = XPT_GDEV_TYPE;
6364 xpt_action(done_ccb);
6365 xpt_async(AC_FOUND_DEVICE, done_ccb->ccb_h.path,
6366 done_ccb);
6367 }
6368 xpt_release_ccb(done_ccb);
6369 break;
6370 }
6371 }
6372 done_ccb = (union ccb *)TAILQ_FIRST(&softc->request_ccbs);
6373 TAILQ_REMOVE(&softc->request_ccbs, &done_ccb->ccb_h, periph_links.tqe);
6374 done_ccb->ccb_h.status = CAM_REQ_CMP;
6375 xpt_done(done_ccb);
6376 if (TAILQ_FIRST(&softc->request_ccbs) == NULL) {
6377 cam_periph_invalidate(periph);
6378 cam_periph_release(periph);
6379 } else {
6380 probeschedule(periph);
6381 }
6382}
6383
6384static void
6385probecleanup(struct cam_periph *periph)
6386{
6387 kfree(periph->softc, M_CAMXPT);
6388}
6389
6390static void
6391xpt_find_quirk(struct cam_ed *device)
6392{
6393 caddr_t match;
6394
6395 match = cam_quirkmatch((caddr_t)&device->inq_data,
6396 (caddr_t)xpt_quirk_table,
6397 sizeof(xpt_quirk_table)/sizeof(*xpt_quirk_table),
6398 sizeof(*xpt_quirk_table), scsi_inquiry_match);
6399
6400 if (match == NULL)
6401 panic("xpt_find_quirk: device didn't match wildcard entry!!");
6402
6403 device->quirk = (struct xpt_quirk_entry *)match;
6404}
6405
6406static int
6407sysctl_cam_search_luns(SYSCTL_HANDLER_ARGS)
6408{
6409 int error, bool;
6410
6411 bool = cam_srch_hi;
6412 error = sysctl_handle_int(oidp, &bool, 0, req);
6413 if (error != 0 || req->newptr == NULL)
6414 return (error);
6415 if (bool == 0 || bool == 1) {
6416 cam_srch_hi = bool;
6417 return (0);
6418 } else {
6419 return (EINVAL);
6420 }
6421}
6422
6423static void
6424xpt_devise_transport(struct cam_path *path)
6425{
6426 struct ccb_pathinq cpi;
6427 struct ccb_trans_settings cts;
6428 struct scsi_inquiry_data *inq_buf;
6429
6430 /* Get transport information from the SIM */
6431 xpt_setup_ccb(&cpi.ccb_h, path, /*priority*/1);
6432 cpi.ccb_h.func_code = XPT_PATH_INQ;
6433 xpt_action((union ccb *)&cpi);
6434
6435 inq_buf = NULL;
6436 if ((path->device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0)
6437 inq_buf = &path->device->inq_data;
6438 path->device->protocol = PROTO_SCSI;
6439 path->device->protocol_version =
6440 inq_buf != NULL ? SID_ANSI_REV(inq_buf) : cpi.protocol_version;
6441 path->device->transport = cpi.transport;
6442 path->device->transport_version = cpi.transport_version;
6443
6444 /*
6445 * Any device not using SPI3 features should
6446 * be considered SPI2 or lower.
6447 */
6448 if (inq_buf != NULL) {
6449 if (path->device->transport == XPORT_SPI
6450 && (inq_buf->spi3data & SID_SPI_MASK) == 0
6451 && path->device->transport_version > 2)
6452 path->device->transport_version = 2;
6453 } else {
6454 struct cam_ed* otherdev;
6455
6456 for (otherdev = TAILQ_FIRST(&path->target->ed_entries);
6457 otherdev != NULL;
6458 otherdev = TAILQ_NEXT(otherdev, links)) {
6459 if (otherdev != path->device)
6460 break;
6461 }
6462
6463 if (otherdev != NULL) {
6464 /*
6465 * Initially assume the same versioning as
6466 * prior luns for this target.
6467 */
6468 path->device->protocol_version =
6469 otherdev->protocol_version;
6470 path->device->transport_version =
6471 otherdev->transport_version;
6472 } else {
6473 /* Until we know better, opt for safty */
6474 path->device->protocol_version = 2;
6475 if (path->device->transport == XPORT_SPI)
6476 path->device->transport_version = 2;
6477 else
6478 path->device->transport_version = 0;
6479 }
6480 }
6481
6482 /*
6483 * XXX
6484 * For a device compliant with SPC-2 we should be able
6485 * to determine the transport version supported by
6486 * scrutinizing the version descriptors in the
6487 * inquiry buffer.
6488 */
6489
6490 /* Tell the controller what we think */
6491 xpt_setup_ccb(&cts.ccb_h, path, /*priority*/1);
6492 cts.ccb_h.func_code = XPT_SET_TRAN_SETTINGS;
6493 cts.type = CTS_TYPE_CURRENT_SETTINGS;
6494 cts.transport = path->device->transport;
6495 cts.transport_version = path->device->transport_version;
6496 cts.protocol = path->device->protocol;
6497 cts.protocol_version = path->device->protocol_version;
6498 cts.proto_specific.valid = 0;
6499 cts.xport_specific.valid = 0;
6500 xpt_action((union ccb *)&cts);
6501}
6502
6503static void
6504xpt_set_transfer_settings(struct ccb_trans_settings *cts, struct cam_ed *device,
6505 int async_update)
6506{
6507 struct ccb_pathinq cpi;
6508 struct ccb_trans_settings cur_cts;
6509 struct ccb_trans_settings_scsi *scsi;
6510 struct ccb_trans_settings_scsi *cur_scsi;
6511 struct cam_sim *sim;
6512 struct scsi_inquiry_data *inq_data;
6513
6514 if (device == NULL) {
6515 cts->ccb_h.status = CAM_PATH_INVALID;
6516 xpt_done((union ccb *)cts);
6517 return;
6518 }
6519
6520 if (cts->protocol == PROTO_UNKNOWN
6521 || cts->protocol == PROTO_UNSPECIFIED) {
6522 cts->protocol = device->protocol;
6523 cts->protocol_version = device->protocol_version;
6524 }
6525
6526 if (cts->protocol_version == PROTO_VERSION_UNKNOWN
6527 || cts->protocol_version == PROTO_VERSION_UNSPECIFIED)
6528 cts->protocol_version = device->protocol_version;
6529
6530 if (cts->protocol != device->protocol) {
6531 xpt_print(cts->ccb_h.path, "Uninitialized Protocol %x:%x?\n",
6532 cts->protocol, device->protocol);
6533 cts->protocol = device->protocol;
6534 }
6535
6536 if (cts->protocol_version > device->protocol_version) {
6537 if (bootverbose) {
6538 xpt_print(cts->ccb_h.path, "Down reving Protocol "
6539 "Version from %d to %d?\n", cts->protocol_version,
6540 device->protocol_version);
6541 }
6542 cts->protocol_version = device->protocol_version;
6543 }
6544
6545 if (cts->transport == XPORT_UNKNOWN
6546 || cts->transport == XPORT_UNSPECIFIED) {
6547 cts->transport = device->transport;
6548 cts->transport_version = device->transport_version;
6549 }
6550
6551 if (cts->transport_version == XPORT_VERSION_UNKNOWN
6552 || cts->transport_version == XPORT_VERSION_UNSPECIFIED)
6553 cts->transport_version = device->transport_version;
6554
6555 if (cts->transport != device->transport) {
6556 xpt_print(cts->ccb_h.path, "Uninitialized Transport %x:%x?\n",
6557 cts->transport, device->transport);
6558 cts->transport = device->transport;
6559 }
6560
6561 if (cts->transport_version > device->transport_version) {
6562 if (bootverbose) {
6563 xpt_print(cts->ccb_h.path, "Down reving Transport "
6564 "Version from %d to %d?\n", cts->transport_version,
6565 device->transport_version);
6566 }
6567 cts->transport_version = device->transport_version;
6568 }
6569
6570 sim = cts->ccb_h.path->bus->sim;
6571
6572 /*
6573 * Nothing more of interest to do unless
6574 * this is a device connected via the
6575 * SCSI protocol.
6576 */
6577 if (cts->protocol != PROTO_SCSI) {
6578 if (async_update == FALSE)
6579 (*(sim->sim_action))(sim, (union ccb *)cts);
6580 return;
6581 }
6582
6583 inq_data = &device->inq_data;
6584 scsi = &cts->proto_specific.scsi;
6585 xpt_setup_ccb(&cpi.ccb_h, cts->ccb_h.path, /*priority*/1);
6586 cpi.ccb_h.func_code = XPT_PATH_INQ;
6587 xpt_action((union ccb *)&cpi);
6588
6589 /* SCSI specific sanity checking */
6590 if ((cpi.hba_inquiry & PI_TAG_ABLE) == 0
6591 || (INQ_DATA_TQ_ENABLED(inq_data)) == 0
6592 || (device->queue_flags & SCP_QUEUE_DQUE) != 0
6593 || (device->quirk->mintags == 0)) {
6594 /*
6595 * Can't tag on hardware that doesn't support tags,
6596 * doesn't have it enabled, or has broken tag support.
6597 */
6598 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6599 }
6600
6601 if (async_update == FALSE) {
6602 /*
6603 * Perform sanity checking against what the
6604 * controller and device can do.
6605 */
6606 xpt_setup_ccb(&cur_cts.ccb_h, cts->ccb_h.path, /*priority*/1);
6607 cur_cts.ccb_h.func_code = XPT_GET_TRAN_SETTINGS;
6608 cur_cts.type = cts->type;
6609 xpt_action((union ccb *)&cur_cts);
6610 if ((cur_cts.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
6611 return;
6612 }
6613 cur_scsi = &cur_cts.proto_specific.scsi;
6614 if ((scsi->valid & CTS_SCSI_VALID_TQ) == 0) {
6615 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6616 scsi->flags |= cur_scsi->flags & CTS_SCSI_FLAGS_TAG_ENB;
6617 }
6618 if ((cur_scsi->valid & CTS_SCSI_VALID_TQ) == 0)
6619 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6620 }
6621
6622 /* SPI specific sanity checking */
6623 if (cts->transport == XPORT_SPI && async_update == FALSE) {
6624 u_int spi3caps;
6625 struct ccb_trans_settings_spi *spi;
6626 struct ccb_trans_settings_spi *cur_spi;
6627
6628 spi = &cts->xport_specific.spi;
6629
6630 cur_spi = &cur_cts.xport_specific.spi;
6631
6632 /* Fill in any gaps in what the user gave us */
6633 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6634 spi->sync_period = cur_spi->sync_period;
6635 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0)
6636 spi->sync_period = 0;
6637 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6638 spi->sync_offset = cur_spi->sync_offset;
6639 if ((cur_spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0)
6640 spi->sync_offset = 0;
6641 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6642 spi->ppr_options = cur_spi->ppr_options;
6643 if ((cur_spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0)
6644 spi->ppr_options = 0;
6645 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6646 spi->bus_width = cur_spi->bus_width;
6647 if ((cur_spi->valid & CTS_SPI_VALID_BUS_WIDTH) == 0)
6648 spi->bus_width = 0;
6649 if ((spi->valid & CTS_SPI_VALID_DISC) == 0) {
6650 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6651 spi->flags |= cur_spi->flags & CTS_SPI_FLAGS_DISC_ENB;
6652 }
6653 if ((cur_spi->valid & CTS_SPI_VALID_DISC) == 0)
6654 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
6655 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6656 && (inq_data->flags & SID_Sync) == 0
6657 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6658 || ((cpi.hba_inquiry & PI_SDTR_ABLE) == 0)
6659 || (spi->sync_offset == 0)
6660 || (spi->sync_period == 0)) {
6661 /* Force async */
6662 spi->sync_period = 0;
6663 spi->sync_offset = 0;
6664 }
6665
6666 switch (spi->bus_width) {
6667 case MSG_EXT_WDTR_BUS_32_BIT:
6668 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6669 || (inq_data->flags & SID_WBus32) != 0
6670 || cts->type == CTS_TYPE_USER_SETTINGS)
6671 && (cpi.hba_inquiry & PI_WIDE_32) != 0)
6672 break;
6673 /* Fall Through to 16-bit */
6674 case MSG_EXT_WDTR_BUS_16_BIT:
6675 if (((device->flags & CAM_DEV_INQUIRY_DATA_VALID) == 0
6676 || (inq_data->flags & SID_WBus16) != 0
6677 || cts->type == CTS_TYPE_USER_SETTINGS)
6678 && (cpi.hba_inquiry & PI_WIDE_16) != 0) {
6679 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
6680 break;
6681 }
6682 /* Fall Through to 8-bit */
6683 default: /* New bus width?? */
6684 case MSG_EXT_WDTR_BUS_8_BIT:
6685 /* All targets can do this */
6686 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
6687 break;
6688 }
6689
6690 spi3caps = cpi.xport_specific.spi.ppr_options;
6691 if ((device->flags & CAM_DEV_INQUIRY_DATA_VALID) != 0
6692 && cts->type == CTS_TYPE_CURRENT_SETTINGS)
6693 spi3caps &= inq_data->spi3data;
6694
6695 if ((spi3caps & SID_SPI_CLOCK_DT) == 0)
6696 spi->ppr_options &= ~MSG_EXT_PPR_DT_REQ;
6697
6698 if ((spi3caps & SID_SPI_IUS) == 0)
6699 spi->ppr_options &= ~MSG_EXT_PPR_IU_REQ;
6700
6701 if ((spi3caps & SID_SPI_QAS) == 0)
6702 spi->ppr_options &= ~MSG_EXT_PPR_QAS_REQ;
6703
6704 /* No SPI Transfer settings are allowed unless we are wide */
6705 if (spi->bus_width == 0)
6706 spi->ppr_options = 0;
6707
6708 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) == 0) {
6709 /*
6710 * Can't tag queue without disconnection.
6711 */
6712 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
6713 scsi->valid |= CTS_SCSI_VALID_TQ;
6714 }
6715
6716 /*
6717 * If we are currently performing tagged transactions to
6718 * this device and want to change its negotiation parameters,
6719 * go non-tagged for a bit to give the controller a chance to
6720 * negotiate unhampered by tag messages.
6721 */
6722 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6723 && (device->inq_flags & SID_CmdQue) != 0
6724 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6725 && (spi->flags & (CTS_SPI_VALID_SYNC_RATE|
6726 CTS_SPI_VALID_SYNC_OFFSET|
6727 CTS_SPI_VALID_BUS_WIDTH)) != 0)
6728 xpt_toggle_tags(cts->ccb_h.path);
6729 }
6730
6731 if (cts->type == CTS_TYPE_CURRENT_SETTINGS
6732 && (scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
6733 int device_tagenb;
6734
6735 /*
6736 * If we are transitioning from tags to no-tags or
6737 * vice-versa, we need to carefully freeze and restart
6738 * the queue so that we don't overlap tagged and non-tagged
6739 * commands. We also temporarily stop tags if there is
6740 * a change in transfer negotiation settings to allow
6741 * "tag-less" negotiation.
6742 */
6743 if ((device->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6744 || (device->inq_flags & SID_CmdQue) != 0)
6745 device_tagenb = TRUE;
6746 else
6747 device_tagenb = FALSE;
6748
6749 if (((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0
6750 && device_tagenb == FALSE)
6751 || ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) == 0
6752 && device_tagenb == TRUE)) {
6753
6754 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) {
6755 /*
6756 * Delay change to use tags until after a
6757 * few commands have gone to this device so
6758 * the controller has time to perform transfer
6759 * negotiations without tagged messages getting
6760 * in the way.
6761 */
6762 device->tag_delay_count = CAM_TAG_DELAY_COUNT;
6763 device->flags |= CAM_DEV_TAG_AFTER_COUNT;
6764 } else {
6765 struct ccb_relsim crs;
6766
6767 xpt_freeze_devq(cts->ccb_h.path, /*count*/1);
6768 device->inq_flags &= ~SID_CmdQue;
6769 xpt_dev_ccbq_resize(cts->ccb_h.path,
6770 sim->max_dev_openings);
6771 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6772 device->tag_delay_count = 0;
6773
6774 xpt_setup_ccb(&crs.ccb_h, cts->ccb_h.path,
6775 /*priority*/1);
6776 crs.ccb_h.func_code = XPT_REL_SIMQ;
6777 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6778 crs.openings
6779 = crs.release_timeout
6780 = crs.qfrozen_cnt
6781 = 0;
6782 xpt_action((union ccb *)&crs);
6783 }
6784 }
6785 }
6786 if (async_update == FALSE)
6787 (*(sim->sim_action))(sim, (union ccb *)cts);
6788}
6789
6790static void
6791xpt_toggle_tags(struct cam_path *path)
6792{
6793 struct cam_ed *dev;
6794
6795 /*
6796 * Give controllers a chance to renegotiate
6797 * before starting tag operations. We
6798 * "toggle" tagged queuing off then on
6799 * which causes the tag enable command delay
6800 * counter to come into effect.
6801 */
6802 dev = path->device;
6803 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
6804 || ((dev->inq_flags & SID_CmdQue) != 0
6805 && (dev->inq_flags & (SID_Sync|SID_WBus16|SID_WBus32)) != 0)) {
6806 struct ccb_trans_settings cts;
6807
6808 xpt_setup_ccb(&cts.ccb_h, path, 1);
6809 cts.protocol = PROTO_SCSI;
6810 cts.protocol_version = PROTO_VERSION_UNSPECIFIED;
6811 cts.transport = XPORT_UNSPECIFIED;
6812 cts.transport_version = XPORT_VERSION_UNSPECIFIED;
6813 cts.proto_specific.scsi.flags = 0;
6814 cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
6815 xpt_set_transfer_settings(&cts, path->device,
6816 /*async_update*/TRUE);
6817 cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
6818 xpt_set_transfer_settings(&cts, path->device,
6819 /*async_update*/TRUE);
6820 }
6821}
6822
6823static void
6824xpt_start_tags(struct cam_path *path)
6825{
6826 struct ccb_relsim crs;
6827 struct cam_ed *device;
6828 struct cam_sim *sim;
6829 int newopenings;
6830
6831 device = path->device;
6832 sim = path->bus->sim;
6833 device->flags &= ~CAM_DEV_TAG_AFTER_COUNT;
6834 xpt_freeze_devq(path, /*count*/1);
6835 device->inq_flags |= SID_CmdQue;
6836 if (device->tag_saved_openings != 0)
6837 newopenings = device->tag_saved_openings;
6838 else
6839 newopenings = min(device->quirk->maxtags,
6840 sim->max_tagged_dev_openings);
6841 xpt_dev_ccbq_resize(path, newopenings);
6842 xpt_setup_ccb(&crs.ccb_h, path, /*priority*/1);
6843 crs.ccb_h.func_code = XPT_REL_SIMQ;
6844 crs.release_flags = RELSIM_RELEASE_AFTER_QEMPTY;
6845 crs.openings
6846 = crs.release_timeout
6847 = crs.qfrozen_cnt
6848 = 0;
6849 xpt_action((union ccb *)&crs);
6850}
6851
6852static int busses_to_config;
6853static int busses_to_reset;
6854
6855static int
6856xptconfigbuscountfunc(struct cam_eb *bus, void *arg)
6857{
6858
6859 sim_lock_assert_owned(bus->sim->lock);
6860
6861 if (bus->path_id != CAM_XPT_PATH_ID) {
6862 struct cam_path path;
6863 struct ccb_pathinq cpi;
6864 int can_negotiate;
6865
6866 busses_to_config++;
6867 xpt_compile_path(&path, NULL, bus->path_id,
6868 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
6869 xpt_setup_ccb(&cpi.ccb_h, &path, /*priority*/1);
6870 cpi.ccb_h.func_code = XPT_PATH_INQ;
6871 xpt_action((union ccb *)&cpi);
6872 can_negotiate = cpi.hba_inquiry;
6873 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6874 if ((cpi.hba_misc & PIM_NOBUSRESET) == 0
6875 && can_negotiate)
6876 busses_to_reset++;
6877 xpt_release_path(&path);
6878 }
6879
6880 return(1);
6881}
6882
6883static int
6884xptconfigfunc(struct cam_eb *bus, void *arg)
6885{
6886 struct cam_path *path;
6887 union ccb *work_ccb;
6888
6889 sim_lock_assert_owned(bus->sim->lock);
6890
6891 if (bus->path_id != CAM_XPT_PATH_ID) {
6892 cam_status status;
6893 int can_negotiate;
6894
6895 work_ccb = xpt_alloc_ccb();
6896 if ((status = xpt_create_path(&path, xpt_periph, bus->path_id,
6897 CAM_TARGET_WILDCARD,
6898 CAM_LUN_WILDCARD)) !=CAM_REQ_CMP){
6899 kprintf("xptconfigfunc: xpt_create_path failed with "
6900 "status %#x for bus %d\n", status, bus->path_id);
6901 kprintf("xptconfigfunc: halting bus configuration\n");
6902 xpt_free_ccb(work_ccb);
6903 busses_to_config--;
6904 xpt_finishconfig(xpt_periph, NULL);
6905 return(0);
6906 }
6907 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6908 work_ccb->ccb_h.func_code = XPT_PATH_INQ;
6909 xpt_action(work_ccb);
6910 if (work_ccb->ccb_h.status != CAM_REQ_CMP) {
6911 kprintf("xptconfigfunc: CPI failed on bus %d "
6912 "with status %d\n", bus->path_id,
6913 work_ccb->ccb_h.status);
6914 xpt_finishconfig(xpt_periph, work_ccb);
6915 return(1);
6916 }
6917
6918 can_negotiate = work_ccb->cpi.hba_inquiry;
6919 can_negotiate &= (PI_WIDE_32|PI_WIDE_16|PI_SDTR_ABLE);
6920 if ((work_ccb->cpi.hba_misc & PIM_NOBUSRESET) == 0
6921 && (can_negotiate != 0)) {
6922 xpt_setup_ccb(&work_ccb->ccb_h, path, /*priority*/1);
6923 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6924 work_ccb->ccb_h.cbfcnp = NULL;
6925 CAM_DEBUG(path, CAM_DEBUG_SUBTRACE,
6926 ("Resetting Bus\n"));
6927 xpt_action(work_ccb);
6928 xpt_finishconfig(xpt_periph, work_ccb);
6929 } else {
6930 /* Act as though we performed a successful BUS RESET */
6931 work_ccb->ccb_h.func_code = XPT_RESET_BUS;
6932 xpt_finishconfig(xpt_periph, work_ccb);
6933 }
6934 }
6935
6936 return(1);
6937}
6938
6939static void
6940xpt_config(void *arg)
6941{
6942 /*
6943 * Now that interrupts are enabled, go find our devices
6944 */
6945
6946#ifdef CAMDEBUG
6947 /* Setup debugging flags and path */
6948#ifdef CAM_DEBUG_FLAGS
6949 cam_dflags = CAM_DEBUG_FLAGS;
6950#else /* !CAM_DEBUG_FLAGS */
6951 cam_dflags = CAM_DEBUG_NONE;
6952#endif /* CAM_DEBUG_FLAGS */
6953#ifdef CAM_DEBUG_BUS
6954 if (cam_dflags != CAM_DEBUG_NONE) {
6955 /*
6956 * Locking is specifically omitted here. No SIMs have
6957 * registered yet, so xpt_create_path will only be searching
6958 * empty lists of targets and devices.
6959 */
6960 if (xpt_create_path(&cam_dpath, xpt_periph,
6961 CAM_DEBUG_BUS, CAM_DEBUG_TARGET,
6962 CAM_DEBUG_LUN) != CAM_REQ_CMP) {
6963 kprintf("xpt_config: xpt_create_path() failed for debug"
6964 " target %d:%d:%d, debugging disabled\n",
6965 CAM_DEBUG_BUS, CAM_DEBUG_TARGET, CAM_DEBUG_LUN);
6966 cam_dflags = CAM_DEBUG_NONE;
6967 }
6968 } else
6969 cam_dpath = NULL;
6970#else /* !CAM_DEBUG_BUS */
6971 cam_dpath = NULL;
6972#endif /* CAM_DEBUG_BUS */
6973#endif /* CAMDEBUG */
6974
6975 /*
6976 * Scan all installed busses.
6977 */
6978 xpt_for_all_busses(xptconfigbuscountfunc, NULL);
6979
6980 if (busses_to_config == 0) {
6981 /* Call manually because we don't have any busses */
6982 xpt_finishconfig(xpt_periph, NULL);
6983 } else {
6984 if (busses_to_reset > 0 && scsi_delay >= 2000) {
6985 kprintf("Waiting %d seconds for SCSI "
6986 "devices to settle\n", scsi_delay/1000);
6987 }
6988 xpt_for_all_busses(xptconfigfunc, NULL);
6989 }
6990}
6991
6992/*
6993 * If the given device only has one peripheral attached to it, and if that
6994 * peripheral is the passthrough driver, announce it. This insures that the
6995 * user sees some sort of announcement for every peripheral in their system.
6996 */
6997static int
6998xptpassannouncefunc(struct cam_ed *device, void *arg)
6999{
7000 struct cam_periph *periph;
7001 int i;
7002
7003 for (periph = SLIST_FIRST(&device->periphs), i = 0; periph != NULL;
7004 periph = SLIST_NEXT(periph, periph_links), i++);
7005
7006 periph = SLIST_FIRST(&device->periphs);
7007 if ((i == 1)
7008 && (strncmp(periph->periph_name, "pass", 4) == 0))
7009 xpt_announce_periph(periph, NULL);
7010
7011 return(1);
7012}
7013
7014static void
7015xpt_finishconfig_task(void *context, int pending)
7016{
7017 struct periph_driver **p_drv;
7018 int i;
7019
7020 if (busses_to_config == 0) {
7021 /* Register all the peripheral drivers */
7022 /* XXX This will have to change when we have loadable modules */
7023 p_drv = periph_drivers;
7024 for (i = 0; p_drv[i] != NULL; i++) {
7025 (*p_drv[i]->init)();
7026 }
7027
7028 /*
7029 * Check for devices with no "standard" peripheral driver
7030 * attached. For any devices like that, announce the
7031 * passthrough driver so the user will see something.
7032 */
7033 xpt_for_all_devices(xptpassannouncefunc, NULL);
7034
7035 /* Release our hook so that the boot can continue. */
7036 config_intrhook_disestablish(xsoftc.xpt_config_hook);
7037 kfree(xsoftc.xpt_config_hook, M_CAMXPT);
7038 xsoftc.xpt_config_hook = NULL;
7039 }
7040
7041 kfree(context, M_CAMXPT);
7042}
7043
7044static void
7045xpt_finishconfig(struct cam_periph *periph, union ccb *done_ccb)
7046{
7047 struct xpt_task *task;
7048
7049 if (done_ccb != NULL) {
7050 CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE,
7051 ("xpt_finishconfig\n"));
7052 switch(done_ccb->ccb_h.func_code) {
7053 case XPT_RESET_BUS:
7054 if (done_ccb->ccb_h.status == CAM_REQ_CMP) {
7055 done_ccb->ccb_h.func_code = XPT_SCAN_BUS;
7056 done_ccb->ccb_h.cbfcnp = xpt_finishconfig;
7057 done_ccb->crcn.flags = 0;
7058 xpt_action(done_ccb);
7059 return;
7060 }
7061 /* FALLTHROUGH */
7062 case XPT_SCAN_BUS:
7063 default:
7064 xpt_free_path(done_ccb->ccb_h.path);
7065 busses_to_config--;
7066 break;
7067 }
7068 }
7069
7070 if (busses_to_config == 0) {
7071 task = kmalloc(sizeof(struct xpt_task), M_CAMXPT, M_INTWAIT);
7072 TASK_INIT(&task->task, 0, xpt_finishconfig_task, task);
7073 taskqueue_enqueue(taskqueue_thread[mycpuid], &task->task);
7074 }
7075
7076 if (done_ccb != NULL)
7077 xpt_free_ccb(done_ccb);
7078}
7079
7080cam_status
7081xpt_register_async(int event, ac_callback_t *cbfunc, void *cbarg,
7082 struct cam_path *path)
7083{
7084 struct ccb_setasync csa;
7085 cam_status status;
7086 int xptpath = 0;
7087
7088 if (path == NULL) {
7089 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
7090 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID,
7091 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
7092 if (status != CAM_REQ_CMP) {
7093 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7094 return (status);
7095 }
7096 xptpath = 1;
7097 }
7098
7099 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5);
7100 csa.ccb_h.func_code = XPT_SASYNC_CB;
7101 csa.event_enable = event;
7102 csa.callback = cbfunc;
7103 csa.callback_arg = cbarg;
7104 xpt_action((union ccb *)&csa);
7105 status = csa.ccb_h.status;
7106 if (xptpath) {
7107 xpt_free_path(path);
7108 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7109 }
7110 return (status);
7111}
7112
7113static void
7114xptaction(struct cam_sim *sim, union ccb *work_ccb)
7115{
7116 CAM_DEBUG(work_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xptaction\n"));
7117
7118 switch (work_ccb->ccb_h.func_code) {
7119 /* Common cases first */
7120 case XPT_PATH_INQ: /* Path routing inquiry */
7121 {
7122 struct ccb_pathinq *cpi;
7123
7124 cpi = &work_ccb->cpi;
7125 cpi->version_num = 1; /* XXX??? */
7126 cpi->hba_inquiry = 0;
7127 cpi->target_sprt = 0;
7128 cpi->hba_misc = 0;
7129 cpi->hba_eng_cnt = 0;
7130 cpi->max_target = 0;
7131 cpi->max_lun = 0;
7132 cpi->initiator_id = 0;
7133 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
7134 strncpy(cpi->hba_vid, "", HBA_IDLEN);
7135 strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
7136 cpi->unit_number = sim->unit_number;
7137 cpi->bus_id = sim->bus_id;
7138 cpi->base_transfer_speed = 0;
7139 cpi->protocol = PROTO_UNSPECIFIED;
7140 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
7141 cpi->transport = XPORT_UNSPECIFIED;
7142 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
7143 cpi->ccb_h.status = CAM_REQ_CMP;
7144 xpt_done(work_ccb);
7145 break;
7146 }
7147 default:
7148 work_ccb->ccb_h.status = CAM_REQ_INVALID;
7149 xpt_done(work_ccb);
7150 break;
7151 }
7152}
7153
7154/*
7155 * The xpt as a "controller" has no interrupt sources, so polling
7156 * is a no-op.
7157 */
7158static void
7159xptpoll(struct cam_sim *sim)
7160{
7161}
7162
7163void
7164xpt_lock_buses(void)
7165{
7166 lockmgr(&xsoftc.xpt_topo_lock, LK_EXCLUSIVE);
7167}
7168
7169void
7170xpt_unlock_buses(void)
7171{
7172 lockmgr(&xsoftc.xpt_topo_lock, LK_RELEASE);
7173}
7174
7175
7176/*
7177 * Should only be called by the machine interrupt dispatch routines,
7178 * so put these prototypes here instead of in the header.
7179 */
7180
7181static void
7182swi_cambio(void *arg, void *frame)
7183{
7184 camisr(NULL);
7185}
7186
7187static void
7188camisr(void *dummy)
7189{
7190 cam_simq_t queue;
7191 struct cam_sim *sim;
7192
7193 spin_lock_wr(&cam_simq_spin);
7194 TAILQ_INIT(&queue);
7195 TAILQ_CONCAT(&queue, &cam_simq, links);
7196 spin_unlock_wr(&cam_simq_spin);
7197
7198 while ((sim = TAILQ_FIRST(&queue)) != NULL) {
7199 TAILQ_REMOVE(&queue, sim, links);
7200 CAM_SIM_LOCK(sim);
7201 sim->flags &= ~CAM_SIM_ON_DONEQ;
7202 camisr_runqueue(sim);
7203 CAM_SIM_UNLOCK(sim);
7204 }
7205}
7206
7207static void
7208camisr_runqueue(struct cam_sim *sim)
7209{
7210 struct ccb_hdr *ccb_h;
7211 int runq;
7212
7213 spin_lock_wr(&sim->sim_spin);
7214 while ((ccb_h = TAILQ_FIRST(&sim->sim_doneq)) != NULL) {
7215 TAILQ_REMOVE(&sim->sim_doneq, ccb_h, sim_links.tqe);
7216 spin_unlock_wr(&sim->sim_spin);
7217 ccb_h->pinfo.index = CAM_UNQUEUED_INDEX;
7218
7219 CAM_DEBUG(ccb_h->path, CAM_DEBUG_TRACE,
7220 ("camisr\n"));
7221
7222 runq = FALSE;
7223
7224 if (ccb_h->flags & CAM_HIGH_POWER) {
7225 struct highpowerlist *hphead;
7226 struct cam_ed *device;
7227 union ccb *send_ccb;
7228
7229 lockmgr(&xsoftc.xpt_lock, LK_EXCLUSIVE);
7230 hphead = &xsoftc.highpowerq;
7231
7232 send_ccb = (union ccb *)STAILQ_FIRST(hphead);
7233
7234 /*
7235 * Increment the count since this command is done.
7236 */
7237 xsoftc.num_highpower++;
7238
7239 /*
7240 * Any high powered commands queued up?
7241 */
7242 if (send_ccb != NULL) {
7243 device = send_ccb->ccb_h.path->device;
7244
7245 STAILQ_REMOVE_HEAD(hphead, xpt_links.stqe);
7246 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7247
7248 xpt_release_devq(send_ccb->ccb_h.path,
7249 /*count*/1, /*runqueue*/TRUE);
7250 } else
7251 lockmgr(&xsoftc.xpt_lock, LK_RELEASE);
7252 }
7253
7254 if ((ccb_h->func_code & XPT_FC_USER_CCB) == 0) {
7255 struct cam_ed *dev;
7256
7257 dev = ccb_h->path->device;
7258
7259 cam_ccbq_ccb_done(&dev->ccbq, (union ccb *)ccb_h);
7260
7261 /*
7262 * devq may be NULL if this is cam_dead_sim
7263 */
7264 if (ccb_h->path->bus->sim->devq) {
7265 ccb_h->path->bus->sim->devq->send_active--;
7266 ccb_h->path->bus->sim->devq->send_openings++;
7267 }
7268
7269 if (((dev->flags & CAM_DEV_REL_ON_COMPLETE) != 0
7270 && (ccb_h->status&CAM_STATUS_MASK) != CAM_REQUEUE_REQ)
7271 || ((dev->flags & CAM_DEV_REL_ON_QUEUE_EMPTY) != 0
7272 && (dev->ccbq.dev_active == 0))) {
7273
7274 xpt_release_devq(ccb_h->path, /*count*/1,
7275 /*run_queue*/TRUE);
7276 }
7277
7278 if ((dev->flags & CAM_DEV_TAG_AFTER_COUNT) != 0
7279 && (--dev->tag_delay_count == 0))
7280 xpt_start_tags(ccb_h->path);
7281
7282 if ((dev->ccbq.queue.entries > 0)
7283 && (dev->qfrozen_cnt == 0)
7284 && (device_is_send_queued(dev) == 0)) {
7285 runq = xpt_schedule_dev_sendq(ccb_h->path->bus,
7286 dev);
7287 }
7288 }
7289
7290 if (ccb_h->status & CAM_RELEASE_SIMQ) {
7291 xpt_release_simq(ccb_h->path->bus->sim,
7292 /*run_queue*/TRUE);
7293 ccb_h->status &= ~CAM_RELEASE_SIMQ;
7294 runq = FALSE;
7295 }
7296
7297 if ((ccb_h->flags & CAM_DEV_QFRZDIS)
7298 && (ccb_h->status & CAM_DEV_QFRZN)) {
7299 xpt_release_devq(ccb_h->path, /*count*/1,
7300 /*run_queue*/TRUE);
7301 ccb_h->status &= ~CAM_DEV_QFRZN;
7302 } else if (runq) {
7303 xpt_run_dev_sendq(ccb_h->path->bus);
7304 }
7305
7306 /* Call the peripheral driver's callback */
7307 (*ccb_h->cbfcnp)(ccb_h->path->periph, (union ccb *)ccb_h);
7308 spin_lock_wr(&sim->sim_spin);
7309 }
7310 spin_unlock_wr(&sim->sim_spin);
7311}
7312
7313/*
7314 * The dead_sim isn't completely hooked into CAM, we have to make sure
7315 * the doneq is cleared after calling xpt_done() so cam_periph_ccbwait()
7316 * doesn't block.
7317 */
7318static void
7319dead_sim_action(struct cam_sim *sim, union ccb *ccb)
7320{
7321
7322 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
7323 xpt_done(ccb);
7324 camisr_runqueue(sim);
7325}
7326
7327static void
7328dead_sim_poll(struct cam_sim *sim)
7329{
7330}