The cam_sim structure was being deallocated unconditionally by device
[dragonfly.git] / sys / dev / raid / iir / iir.c
CommitLineData
984263bc 1/* $FreeBSD: src/sys/dev/iir/iir.c,v 1.2.2.3 2002/05/05 08:18:12 asmodai Exp $ */
521cf4d2 2/* $DragonFly: src/sys/dev/raid/iir/iir.c,v 1.6 2004/03/15 03:05:10 dillon Exp $ */
984263bc
MD
3/*
4 * Copyright (c) 2000-01 Intel Corporation
5 * All Rights Reserved
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification, immediately at the beginning of the file.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32/*
33 * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver
34 *
35 * Written by: Achim Leubner <achim.leubner@intel.com>
36 * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
37 *
38 * credits: Niklas Hallqvist; OpenBSD driver for the ICP Controllers.
39 * Mike Smith; Some driver source code.
40 * FreeBSD.ORG; Great O/S to work on and for.
41 *
42 * TODO:
43 */
44
45#ident "$Id: iir.c 1.2 2001/06/21 20:28:32 achim Exp $"
46
47#define _IIR_C_
48
49/* #include "opt_iir.h" */
50#include <sys/param.h>
51#include <sys/systm.h>
1f2de5d4 52#include <sys/types.h>
984263bc
MD
53#include <sys/eventhandler.h>
54#include <sys/malloc.h>
55#include <sys/kernel.h>
56#include <sys/bus.h>
57
984263bc
MD
58#include <machine/bus_memio.h>
59#include <machine/bus_pio.h>
60#include <machine/bus.h>
61#include <machine/clock.h>
62#include <machine/stdarg.h>
63
1f2de5d4
MD
64#include <bus/cam/cam.h>
65#include <bus/cam/cam_ccb.h>
66#include <bus/cam/cam_sim.h>
67#include <bus/cam/cam_xpt_sim.h>
68#include <bus/cam/cam_debug.h>
69#include <bus/cam/scsi/scsi_all.h>
70#include <bus/cam/scsi/scsi_message.h>
984263bc
MD
71
72#include <vm/vm.h>
73#include <vm/pmap.h>
74
1f2de5d4 75#include "iir.h"
984263bc
MD
76
77struct gdt_softc *gdt_wait_gdt;
78int gdt_wait_index;
79
80#ifdef GDT_DEBUG
81int gdt_debug = GDT_DEBUG;
82#ifdef __SERIAL__
83#define MAX_SERBUF 160
84static void ser_init(void);
85static void ser_puts(char *str);
86static void ser_putc(int c);
87static char strbuf[MAX_SERBUF+1];
88#ifdef __COM2__
89#define COM_BASE 0x2f8
90#else
91#define COM_BASE 0x3f8
92#endif
93static void ser_init()
94{
95 unsigned port=COM_BASE;
96
97 outb(port+3, 0x80);
98 outb(port+1, 0);
99 /* 19200 Baud, if 9600: outb(12,port) */
100 outb(port, 6);
101 outb(port+3, 3);
102 outb(port+1, 0);
103}
104
105static void ser_puts(char *str)
106{
107 char *ptr;
108
109 ser_init();
110 for (ptr=str;*ptr;++ptr)
111 ser_putc((int)(*ptr));
112}
113
114static void ser_putc(int c)
115{
116 unsigned port=COM_BASE;
117
118 while ((inb(port+5) & 0x20)==0);
119 outb(port, c);
120 if (c==0x0a)
121 {
122 while ((inb(port+5) & 0x20)==0);
123 outb(port, 0x0d);
124 }
125}
126
127int ser_printf(const char *fmt, ...)
128{
e2565a42 129 __va_list args;
984263bc
MD
130 int i;
131
e2565a42 132 __va_start(args,fmt);
984263bc
MD
133 i = vsprintf(strbuf,fmt,args);
134 ser_puts(strbuf);
e2565a42 135 __va_end(args);
984263bc
MD
136 return i;
137}
138#endif
139#endif
140
141/* The linked list of softc structures */
142struct gdt_softc_list gdt_softcs = TAILQ_HEAD_INITIALIZER(gdt_softcs);
143/* controller cnt. */
144int gdt_cnt = 0;
145/* event buffer */
146static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
147static int elastidx, eoldidx;
148/* statistics */
149gdt_statist_t gdt_stat;
150
151/* Definitions for our use of the SIM private CCB area */
152#define ccb_sim_ptr spriv_ptr0
153#define ccb_priority spriv_field1
154
155static void iir_action(struct cam_sim *sim, union ccb *ccb);
156static void iir_poll(struct cam_sim *sim);
157static void iir_shutdown(void *arg, int howto);
158static void iir_timeout(void *arg);
159static void iir_watchdog(void *arg);
160
161static void gdt_eval_mapping(u_int32_t size, int *cyls, int *heads,
162 int *secs);
163static int gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
164 u_int8_t service, u_int16_t opcode,
165 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
166static int gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb,
167 int timeout);
168
169static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
170static u_int32_t gdt_ccb_vtop(struct gdt_softc *gdt,
171 struct gdt_ccb *gccb);
172
173static int gdt_sync_event(struct gdt_softc *gdt, int service,
174 u_int8_t index, struct gdt_ccb *gccb);
175static int gdt_async_event(struct gdt_softc *gdt, int service);
176static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt,
177 union ccb *ccb, int *lock);
178static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt,
179 union ccb *ccb, int *lock);
180static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt,
181 gdt_ucmd_t *ucmd, int *lock);
182static void gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb);
183
184static void gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
185 int nseg, int error);
186static void gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
187 int nseg, int error);
188
189int
190iir_init(struct gdt_softc *gdt)
191{
192 u_int16_t cdev_cnt;
193 int i, id, drv_cyls, drv_hds, drv_secs;
194 struct gdt_ccb *gccb;
195
196 GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
197
198 gdt->sc_state = GDT_POLLING;
199 gdt_clear_events();
200 bzero(&gdt_stat, sizeof(gdt_statist_t));
201
202 SLIST_INIT(&gdt->sc_free_gccb);
203 SLIST_INIT(&gdt->sc_pending_gccb);
204 TAILQ_INIT(&gdt->sc_ccb_queue);
205 TAILQ_INIT(&gdt->sc_ucmd_queue);
206 TAILQ_INSERT_TAIL(&gdt_softcs, gdt, links);
207
208 /* DMA tag for mapping buffers into device visible space. */
209 if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
210 /*lowaddr*/BUS_SPACE_MAXADDR,
211 /*highaddr*/BUS_SPACE_MAXADDR,
212 /*filter*/NULL, /*filterarg*/NULL,
213 /*maxsize*/MAXBSIZE, /*nsegments*/GDT_MAXSG,
214 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
215 /*flags*/BUS_DMA_ALLOCNOW,
216 &gdt->sc_buffer_dmat) != 0) {
217 printf("iir%d: bus_dma_tag_create(...,gdt->sc_buffer_dmat) failed\n",
218 gdt->sc_hanum);
219 return (1);
220 }
221 gdt->sc_init_level++;
222
223 /* DMA tag for our ccb structures */
224 if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
225 /*lowaddr*/BUS_SPACE_MAXADDR,
226 /*highaddr*/BUS_SPACE_MAXADDR,
227 /*filter*/NULL, /*filterarg*/NULL,
228 GDT_MAXCMDS * sizeof(struct gdt_ccb),
229 /*nsegments*/1,
230 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
231 /*flags*/0, &gdt->sc_gccb_dmat) != 0) {
232 printf("iir%d: bus_dma_tag_create(...,gdt->sc_gccb_dmat) failed\n",
233 gdt->sc_hanum);
234 return (1);
235 }
236 gdt->sc_init_level++;
237
238 /* Allocation for our ccbs */
239 if (bus_dmamem_alloc(gdt->sc_gccb_dmat, (void **)&gdt->sc_gccbs,
240 BUS_DMA_NOWAIT, &gdt->sc_gccb_dmamap) != 0) {
241 printf("iir%d: bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n",
242 gdt->sc_hanum);
243 return (1);
244 }
245 gdt->sc_init_level++;
246
247 /* And permanently map them */
248 bus_dmamap_load(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap,
249 gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb),
250 gdtmapmem, &gdt->sc_gccb_busbase, /*flags*/0);
251 gdt->sc_init_level++;
252
253 /* Clear them out. */
254 bzero(gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb));
255
256 /* Initialize the ccbs */
257 for (i = GDT_MAXCMDS-1; i >= 0; i--) {
258 gdt->sc_gccbs[i].gc_cmd_index = i + 2;
259 gdt->sc_gccbs[i].gc_flags = GDT_GCF_UNUSED;
260 gdt->sc_gccbs[i].gc_map_flag = FALSE;
261 if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
262 &gdt->sc_gccbs[i].gc_dmamap) != 0)
263 return(1);
264 gdt->sc_gccbs[i].gc_map_flag = TRUE;
265 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, &gdt->sc_gccbs[i], sle);
266 }
267 gdt->sc_init_level++;
268
269 /* create the control device */
270 gdt->sc_dev = gdt_make_dev(gdt->sc_hanum);
271
272 /* allocate ccb for gdt_internal_cmd() */
273 gccb = gdt_get_ccb(gdt);
274 if (gccb == NULL) {
275 printf("iir%d: No free command index found\n",
276 gdt->sc_hanum);
277 return (1);
278 }
279
280 if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT,
281 0, 0, 0)) {
282 printf("iir%d: Screen service initialization error %d\n",
283 gdt->sc_hanum, gdt->sc_status);
284 gdt_free_ccb(gdt, gccb);
285 return (1);
286 }
287
288 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
289 GDT_LINUX_OS, 0, 0)) {
290 printf("iir%d: Cache service initialization error %d\n",
291 gdt->sc_hanum, gdt->sc_status);
292 gdt_free_ccb(gdt, gccb);
293 return (1);
294 }
295 gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
296 0, 0, 0);
297
298 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_MOUNT,
299 0xffff, 1, 0)) {
300 printf("iir%d: Cache service mount error %d\n",
301 gdt->sc_hanum, gdt->sc_status);
302 gdt_free_ccb(gdt, gccb);
303 return (1);
304 }
305
306 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
307 GDT_LINUX_OS, 0, 0)) {
308 printf("iir%d: Cache service post-mount initialization error %d\n",
309 gdt->sc_hanum, gdt->sc_status);
310 gdt_free_ccb(gdt, gccb);
311 return (1);
312 }
313 cdev_cnt = (u_int16_t)gdt->sc_info;
314 gdt->sc_fw_vers = gdt->sc_service;
315
316 /* Detect number of buses */
317 gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
318 gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
319 gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
320 gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
321 gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
322 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
323 GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
324 GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
325 gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
326 for (i = 0; i < gdt->sc_bus_cnt; i++) {
327 id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
328 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
329 gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
330 }
331 } else {
332 /* New method failed, use fallback. */
333 for (i = 0; i < GDT_MAXBUS; i++) {
334 gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
335 if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
336 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
337 GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
338 GDT_GETCH_SZ)) {
339 if (i == 0) {
340 printf("iir%d: Cannot get channel count, "
341 "error %d\n", gdt->sc_hanum, gdt->sc_status);
342 gdt_free_ccb(gdt, gccb);
343 return (1);
344 }
345 break;
346 }
347 gdt->sc_bus_id[i] =
348 (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
349 gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
350 }
351 gdt->sc_bus_cnt = i;
352 }
353 /* add one "virtual" channel for the host drives */
354 gdt->sc_virt_bus = gdt->sc_bus_cnt;
355 gdt->sc_bus_cnt++;
356
357 if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT,
358 0, 0, 0)) {
359 printf("iir%d: Raw service initialization error %d\n",
360 gdt->sc_hanum, gdt->sc_status);
361 gdt_free_ccb(gdt, gccb);
362 return (1);
363 }
364
365 /* Set/get features raw service (scatter/gather) */
366 gdt->sc_raw_feat = 0;
367 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
368 GDT_SCATTER_GATHER, 0, 0)) {
369 if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT,
370 0, 0, 0)) {
371 gdt->sc_raw_feat = gdt->sc_info;
372 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
373 panic("iir%d: Scatter/Gather Raw Service "
374 "required but not supported!\n", gdt->sc_hanum);
375 gdt_free_ccb(gdt, gccb);
376 return (1);
377 }
378 }
379 }
380
381 /* Set/get features cache service (scatter/gather) */
382 gdt->sc_cache_feat = 0;
383 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT,
384 0, GDT_SCATTER_GATHER, 0)) {
385 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT,
386 0, 0, 0)) {
387 gdt->sc_cache_feat = gdt->sc_info;
388 if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
389 panic("iir%d: Scatter/Gather Cache Service "
390 "required but not supported!\n", gdt->sc_hanum);
391 gdt_free_ccb(gdt, gccb);
392 return (1);
393 }
394 }
395 }
396
397 /* Scan for cache devices */
398 for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
399 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO,
400 i, 0, 0)) {
401 gdt->sc_hdr[i].hd_present = 1;
402 gdt->sc_hdr[i].hd_size = gdt->sc_info;
403
404 /*
405 * Evaluate mapping (sectors per head, heads per cyl)
406 */
407 gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
408 if (gdt->sc_info2 == 0)
409 gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
410 &drv_cyls, &drv_hds, &drv_secs);
411 else {
412 drv_hds = gdt->sc_info2 & 0xff;
413 drv_secs = (gdt->sc_info2 >> 8) & 0xff;
414 drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
415 drv_secs;
416 }
417 gdt->sc_hdr[i].hd_heads = drv_hds;
418 gdt->sc_hdr[i].hd_secs = drv_secs;
419 /* Round the size */
420 gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
421
422 if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
423 GDT_DEVTYPE, i, 0, 0))
424 gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
425 }
426 }
427
428 GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n",
429 gdt->sc_dpmembase,
430 gdt->sc_bus_cnt, cdev_cnt,
431 cdev_cnt == 1 ? "" : "s"));
432 gdt_free_ccb(gdt, gccb);
433
434 gdt_cnt++;
435 return (0);
436}
437
438void
439iir_free(struct gdt_softc *gdt)
440{
441 int i;
442
443 GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
444
445 switch (gdt->sc_init_level) {
446 default:
447 gdt_destroy_dev(gdt->sc_dev);
448 case 5:
449 for (i = GDT_MAXCMDS-1; i >= 0; i--)
450 if (gdt->sc_gccbs[i].gc_map_flag)
451 bus_dmamap_destroy(gdt->sc_buffer_dmat,
452 gdt->sc_gccbs[i].gc_dmamap);
453 bus_dmamap_unload(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap);
454 case 4:
455 bus_dmamem_free(gdt->sc_gccb_dmat, gdt->sc_gccbs, gdt->sc_gccb_dmamap);
456 case 3:
457 bus_dma_tag_destroy(gdt->sc_gccb_dmat);
458 case 2:
459 bus_dma_tag_destroy(gdt->sc_buffer_dmat);
460 case 1:
461 bus_dma_tag_destroy(gdt->sc_parent_dmat);
462 case 0:
463 break;
464 }
465 TAILQ_REMOVE(&gdt_softcs, gdt, links);
466}
467
468void
469iir_attach(struct gdt_softc *gdt)
470{
471 struct cam_devq *devq;
472 int i;
473
474 GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
475
476 /*
477 * Create the device queue for our SIM.
478 */
479 devq = cam_simq_alloc(GDT_MAXCMDS);
480 if (devq == NULL)
481 return;
482
483 for (i = 0; i < gdt->sc_bus_cnt; i++) {
484 /*
485 * Construct our SIM entry
486 */
487 gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
488 gdt, gdt->sc_hanum, /*untagged*/2,
489 /*tagged*/GDT_MAXCMDS, devq);
490 if (xpt_bus_register(gdt->sims[i], i) != CAM_SUCCESS) {
521cf4d2 491 cam_sim_free(gdt->sims[i]);
984263bc
MD
492 break;
493 }
494
495 if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
496 cam_sim_path(gdt->sims[i]),
497 CAM_TARGET_WILDCARD,
498 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
499 xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
521cf4d2 500 cam_sim_free(gdt->sims[i]);
984263bc
MD
501 break;
502 }
503 }
521cf4d2 504 cam_simq_release(devq);
984263bc
MD
505 if (i > 0)
506 EVENTHANDLER_REGISTER(shutdown_final, iir_shutdown,
507 gdt, SHUTDOWN_PRI_DEFAULT);
508 /* iir_watchdog(gdt); */
509 gdt->sc_state = GDT_NORMAL;
510}
511
512static void
513gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
514{
515 *cyls = size / GDT_HEADS / GDT_SECS;
516 if (*cyls < GDT_MAXCYLS) {
517 *heads = GDT_HEADS;
518 *secs = GDT_SECS;
519 } else {
520 /* Too high for 64 * 32 */
521 *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
522 if (*cyls < GDT_MAXCYLS) {
523 *heads = GDT_MEDHEADS;
524 *secs = GDT_MEDSECS;
525 } else {
526 /* Too high for 127 * 63 */
527 *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
528 *heads = GDT_BIGHEADS;
529 *secs = GDT_BIGSECS;
530 }
531 }
532}
533
534static int
535gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb,
536 int timeout)
537{
538 int rv = 0;
539
540 GDT_DPRINTF(GDT_D_INIT,
541 ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
542
543 gdt->sc_state |= GDT_POLL_WAIT;
544 do {
545 iir_intr(gdt);
546 if (gdt == gdt_wait_gdt &&
547 gccb->gc_cmd_index == gdt_wait_index) {
548 rv = 1;
549 break;
550 }
551 DELAY(1);
552 } while (--timeout);
553 gdt->sc_state &= ~GDT_POLL_WAIT;
554
555 while (gdt->sc_test_busy(gdt))
556 DELAY(1); /* XXX correct? */
557
558 return (rv);
559}
560
561static int
562gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
563 u_int8_t service, u_int16_t opcode,
564 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
565{
566 int retries;
567
568 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
569 gdt, service, opcode, arg1, arg2, arg3));
570
571 bzero(gdt->sc_cmd, GDT_CMD_SZ);
572
573 for (retries = GDT_RETRIES; ; ) {
574 gccb->gc_service = service;
575 gccb->gc_flags = GDT_GCF_INTERNAL;
576
577 gdt->sc_set_sema0(gdt);
578 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
579 gccb->gc_cmd_index);
580 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
581
582 switch (service) {
583 case GDT_CACHESERVICE:
584 if (opcode == GDT_IOCTL) {
585 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
586 GDT_IOCTL_SUBFUNC, arg1);
587 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
588 GDT_IOCTL_CHANNEL, arg2);
589 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
590 GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
591 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
592 gdt_ccb_vtop(gdt, gccb) +
593 offsetof(struct gdt_ccb, gc_scratch[0]));
594 } else {
595 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
596 GDT_CACHE_DEVICENO, (u_int16_t)arg1);
597 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
598 GDT_CACHE_BLOCKNO, arg2);
599 }
600 break;
601
602 case GDT_SCSIRAWSERVICE:
603 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
604 GDT_RAW_DIRECTION, arg1);
605 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
606 (u_int8_t)arg2;
607 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
608 (u_int8_t)arg3;
609 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
610 (u_int8_t)(arg3 >> 8);
611 }
612
613 gdt->sc_cmd_len = GDT_CMD_SZ;
614 gdt->sc_cmd_off = 0;
615 gdt->sc_cmd_cnt = 0;
616 gdt->sc_copy_cmd(gdt, gccb);
617 gdt->sc_release_event(gdt);
618 DELAY(20);
619 if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
620 return (0);
621 if (gdt->sc_status != GDT_S_BSY || --retries == 0)
622 break;
623 DELAY(1);
624 }
625 return (gdt->sc_status == GDT_S_OK);
626}
627
628static struct gdt_ccb *
629gdt_get_ccb(struct gdt_softc *gdt)
630{
631 struct gdt_ccb *gccb;
632 int lock;
633
634 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
635
636 lock = splcam();
637 gccb = SLIST_FIRST(&gdt->sc_free_gccb);
638 if (gccb != NULL) {
639 SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
640 SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
641 ++gdt_stat.cmd_index_act;
642 if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
643 gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
644 }
645 splx(lock);
646 return (gccb);
647}
648
649void
650gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
651{
652 int lock;
653
654 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
655
656 lock = splcam();
657 gccb->gc_flags = GDT_GCF_UNUSED;
658 SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
659 SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
660 --gdt_stat.cmd_index_act;
661 splx(lock);
662 if (gdt->sc_state & GDT_SHUTDOWN)
663 wakeup(gccb);
664}
665
666static u_int32_t
667gdt_ccb_vtop(struct gdt_softc *gdt, struct gdt_ccb *gccb)
668{
669 return (gdt->sc_gccb_busbase
670 + (u_int32_t)((caddr_t)gccb - (caddr_t)gdt->sc_gccbs));
671}
672
673void
674gdt_next(struct gdt_softc *gdt)
675{
676 int lock;
677 union ccb *ccb;
678 gdt_ucmd_t *ucmd;
679 struct cam_sim *sim;
680 int bus, target, lun;
681 int next_cmd;
682
683 struct ccb_scsiio *csio;
684 struct ccb_hdr *ccbh;
685 struct gdt_ccb *gccb = NULL;
686 u_int8_t cmd;
687
688 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
689
690 lock = splcam();
691 if (gdt->sc_test_busy(gdt)) {
692 if (!(gdt->sc_state & GDT_POLLING)) {
693 splx(lock);
694 return;
695 }
696 while (gdt->sc_test_busy(gdt))
697 DELAY(1);
698 }
699
700 gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
701 next_cmd = TRUE;
702 for (;;) {
703 /* I/Os in queue? controller ready? */
704 if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
705 !TAILQ_FIRST(&gdt->sc_ccb_queue))
706 break;
707
708 /* 1.: I/Os without ccb (IOCTLs) */
709 ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
710 if (ucmd != NULL) {
711 TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
712 if ((gccb = gdt_ioctl_cmd(gdt, ucmd, &lock)) == NULL) {
713 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
714 break;
715 }
716 break;
717 /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
718 }
719
720 /* 2.: I/Os with ccb */
721 ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue);
722 /* ist dann immer != NULL, da oben getestet */
723 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
724 bus = cam_sim_bus(sim);
725 target = ccb->ccb_h.target_id;
726 lun = ccb->ccb_h.target_lun;
727
728 TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
729 --gdt_stat.req_queue_act;
730 /* ccb->ccb_h.func_code is XPT_SCSI_IO */
731 GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n",
732 ccb->ccb_h.flags));
733 csio = &ccb->csio;
734 ccbh = &ccb->ccb_h;
735 cmd = csio->cdb_io.cdb_bytes[0];
736 /* Max CDB length is 12 bytes */
737 if (csio->cdb_len > 12) {
738 ccbh->status = CAM_REQ_INVALID;
739 --gdt_stat.io_count_act;
740 xpt_done(ccb);
741 } else if (bus != gdt->sc_virt_bus) {
742 /* raw service command */
743 if ((gccb = gdt_raw_cmd(gdt, ccb, &lock)) == NULL) {
744 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
745 sim_links.tqe);
746 ++gdt_stat.req_queue_act;
747 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
748 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
749 next_cmd = FALSE;
750 }
751 } else if (target >= GDT_MAX_HDRIVES ||
752 !gdt->sc_hdr[target].hd_present || lun != 0) {
753 ccbh->status = CAM_SEL_TIMEOUT;
754 --gdt_stat.io_count_act;
755 xpt_done(ccb);
756 } else {
757 /* cache service command */
758 if (cmd == READ_6 || cmd == WRITE_6 ||
759 cmd == READ_10 || cmd == WRITE_10) {
760 if ((gccb = gdt_cache_cmd(gdt, ccb, &lock)) == NULL) {
761 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
762 sim_links.tqe);
763 ++gdt_stat.req_queue_act;
764 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
765 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
766 next_cmd = FALSE;
767 }
768 } else {
769 splx(lock);
770 gdt_internal_cache_cmd(gdt, ccb);
771 lock = splcam();
772 }
773 }
774 if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
775 break;
776 }
777 if (gdt->sc_cmd_cnt > 0)
778 gdt->sc_release_event(gdt);
779
780 splx(lock);
781
782 if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
783 gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
784 }
785}
786
787static struct gdt_ccb *
788gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
789{
790 struct gdt_ccb *gccb;
791 struct cam_sim *sim;
792
793 GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
794
795 if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
796 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
797 gdt->sc_ic_all_size) {
798 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_raw_cmd(): DPMEM overflow\n",
799 gdt->sc_hanum));
800 return (NULL);
801 }
802
803 bzero(gdt->sc_cmd, GDT_CMD_SZ);
804
805 gccb = gdt_get_ccb(gdt);
806 if (gccb == NULL) {
807 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: No free command index found\n",
808 gdt->sc_hanum));
809 return (gccb);
810 }
811 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
812 gccb->gc_ccb = ccb;
813 gccb->gc_service = GDT_SCSIRAWSERVICE;
814 gccb->gc_flags = GDT_GCF_SCSI;
815
816 if (gdt->sc_cmd_cnt == 0)
817 gdt->sc_set_sema0(gdt);
818 splx(*lock);
819 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
820 gccb->gc_cmd_index);
821 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
822
823 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
824 (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
825 GDT_DATA_IN : GDT_DATA_OUT);
826 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
827 ccb->csio.dxfer_len);
828 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
829 ccb->csio.cdb_len);
830 bcopy(ccb->csio.cdb_io.cdb_bytes, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
831 ccb->csio.cdb_len);
832 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
833 ccb->ccb_h.target_id;
834 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
835 ccb->ccb_h.target_lun;
836 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
837 cam_sim_bus(sim);
838 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
839 sizeof(struct scsi_sense_data));
840 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
841 gdt_ccb_vtop(gdt, gccb) +
842 offsetof(struct gdt_ccb, gc_scratch[0]));
843
844 /*
845 * If we have any data to send with this command,
846 * map it into bus space.
847 */
848 /* Only use S/G if there is a transfer */
849 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
850 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
851 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
852 int s;
853 int error;
854
855 /* vorher unlock von splcam() ??? */
856 s = splsoftvm();
857 error =
858 bus_dmamap_load(gdt->sc_buffer_dmat,
859 gccb->gc_dmamap,
860 ccb->csio.data_ptr,
861 ccb->csio.dxfer_len,
862 gdtexecuteccb,
863 gccb, /*flags*/0);
864 if (error == EINPROGRESS) {
865 xpt_freeze_simq(sim, 1);
866 gccb->gc_state |= CAM_RELEASE_SIMQ;
867 }
868 splx(s);
869 } else {
870 struct bus_dma_segment seg;
871
872 /* Pointer to physical buffer */
873 seg.ds_addr =
874 (bus_addr_t)ccb->csio.data_ptr;
875 seg.ds_len = ccb->csio.dxfer_len;
876 gdtexecuteccb(gccb, &seg, 1, 0);
877 }
878 } else {
879 struct bus_dma_segment *segs;
880
881 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
882 panic("iir%d: iir_action - Physical "
883 "segment pointers unsupported", gdt->sc_hanum);
884
885 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
886 panic("iir%d: iir_action - Virtual "
887 "segment addresses unsupported", gdt->sc_hanum);
888
889 /* Just use the segments provided */
890 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
891 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
892 }
893 } else {
894 gdtexecuteccb(gccb, NULL, 0, 0);
895 }
896
897 *lock = splcam();
898 return (gccb);
899}
900
901static struct gdt_ccb *
902gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb, int *lock)
903{
904 struct gdt_ccb *gccb;
905 struct cam_sim *sim;
906 u_int8_t *cmdp;
907 u_int16_t opcode;
908 u_int32_t blockno, blockcnt;
909
910 GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
911
912 if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
913 gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
914 gdt->sc_ic_all_size) {
915 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_cache_cmd(): DPMEM overflow\n",
916 gdt->sc_hanum));
917 return (NULL);
918 }
919
920 bzero(gdt->sc_cmd, GDT_CMD_SZ);
921
922 gccb = gdt_get_ccb(gdt);
923 if (gccb == NULL) {
924 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
925 gdt->sc_hanum));
926 return (gccb);
927 }
928 sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
929 gccb->gc_ccb = ccb;
930 gccb->gc_service = GDT_CACHESERVICE;
931 gccb->gc_flags = GDT_GCF_SCSI;
932
933 if (gdt->sc_cmd_cnt == 0)
934 gdt->sc_set_sema0(gdt);
935 splx(*lock);
936 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
937 gccb->gc_cmd_index);
938 cmdp = ccb->csio.cdb_io.cdb_bytes;
939 opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
940 if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
941 opcode = GDT_WRITE_THR;
942 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
943
944 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
945 ccb->ccb_h.target_id);
946 if (ccb->csio.cdb_len == 6) {
947 struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
948 blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
949 blockcnt = rw->length ? rw->length : 0x100;
950 } else {
951 struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
952 blockno = scsi_4btoul(rw->addr);
953 blockcnt = scsi_2btoul(rw->length);
954 }
955 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
956 blockno);
957 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
958 blockcnt);
959
960 /*
961 * If we have any data to send with this command,
962 * map it into bus space.
963 */
964 /* Only use S/G if there is a transfer */
965 if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
966 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
967 int s;
968 int error;
969
970 /* vorher unlock von splcam() ??? */
971 s = splsoftvm();
972 error =
973 bus_dmamap_load(gdt->sc_buffer_dmat,
974 gccb->gc_dmamap,
975 ccb->csio.data_ptr,
976 ccb->csio.dxfer_len,
977 gdtexecuteccb,
978 gccb, /*flags*/0);
979 if (error == EINPROGRESS) {
980 xpt_freeze_simq(sim, 1);
981 gccb->gc_state |= CAM_RELEASE_SIMQ;
982 }
983 splx(s);
984 } else {
985 struct bus_dma_segment seg;
986
987 /* Pointer to physical buffer */
988 seg.ds_addr =
989 (bus_addr_t)ccb->csio.data_ptr;
990 seg.ds_len = ccb->csio.dxfer_len;
991 gdtexecuteccb(gccb, &seg, 1, 0);
992 }
993 } else {
994 struct bus_dma_segment *segs;
995
996 if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
997 panic("iir%d: iir_action - Physical "
998 "segment pointers unsupported", gdt->sc_hanum);
999
1000 if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
1001 panic("iir%d: iir_action - Virtual "
1002 "segment addresses unsupported", gdt->sc_hanum);
1003
1004 /* Just use the segments provided */
1005 segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1006 gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
1007 }
1008
1009 *lock = splcam();
1010 return (gccb);
1011}
1012
1013static struct gdt_ccb *
1014gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd, int *lock)
1015{
1016 struct gdt_ccb *gccb;
1017 u_int32_t cnt;
1018
1019 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
1020
1021 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1022
1023 gccb = gdt_get_ccb(gdt);
1024 if (gccb == NULL) {
1025 GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
1026 gdt->sc_hanum));
1027 return (gccb);
1028 }
1029 gccb->gc_ucmd = ucmd;
1030 gccb->gc_service = ucmd->service;
1031 gccb->gc_flags = GDT_GCF_IOCTL;
1032
1033 /* check DPMEM space, copy data buffer from user space */
1034 if (ucmd->service == GDT_CACHESERVICE) {
1035 if (ucmd->OpCode == GDT_IOCTL) {
1036 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
1037 sizeof(u_int32_t));
1038 cnt = ucmd->u.ioctl.param_size;
1039 if (cnt > GDT_SCRATCH_SZ) {
1040 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1041 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1042 gdt_free_ccb(gdt, gccb);
1043 return (NULL);
1044 }
1045 } else {
1046 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1047 GDT_SG_SZ, sizeof(u_int32_t));
1048 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1049 if (cnt > GDT_SCRATCH_SZ) {
1050 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1051 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1052 gdt_free_ccb(gdt, gccb);
1053 return (NULL);
1054 }
1055 }
1056 } else {
1057 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1058 GDT_SG_SZ, sizeof(u_int32_t));
1059 cnt = ucmd->u.raw.sdlen;
1060 if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
1061 printf("iir%d: Scratch buffer too small (%d/%d)\n",
1062 gdt->sc_hanum, GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
1063 gdt_free_ccb(gdt, gccb);
1064 return (NULL);
1065 }
1066 }
1067 if (cnt != 0)
1068 bcopy(ucmd->data, gccb->gc_scratch, cnt);
1069
1070 if (gdt->sc_cmd_off + gdt->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
1071 gdt->sc_ic_all_size) {
1072 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_ioctl_cmd(): DPMEM overflow\n",
1073 gdt->sc_hanum));
1074 gdt_free_ccb(gdt, gccb);
1075 return (NULL);
1076 }
1077
1078 if (gdt->sc_cmd_cnt == 0)
1079 gdt->sc_set_sema0(gdt);
1080 splx(*lock);
1081
1082 /* fill cmd structure */
1083 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1084 gccb->gc_cmd_index);
1085 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE,
1086 ucmd->OpCode);
1087
1088 if (ucmd->service == GDT_CACHESERVICE) {
1089 if (ucmd->OpCode == GDT_IOCTL) {
1090 /* IOCTL */
1091 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
1092 ucmd->u.ioctl.param_size);
1093 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
1094 ucmd->u.ioctl.subfunc);
1095 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
1096 ucmd->u.ioctl.channel);
1097 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
1098 gdt_ccb_vtop(gdt, gccb) +
1099 offsetof(struct gdt_ccb, gc_scratch[0]));
1100 } else {
1101 /* cache service command */
1102 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
1103 ucmd->u.cache.DeviceNo);
1104 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
1105 ucmd->u.cache.BlockNo);
1106 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
1107 ucmd->u.cache.BlockCnt);
1108 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1109 0xffffffffUL);
1110 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1111 1);
1112 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1113 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1114 offsetof(struct gdt_ccb, gc_scratch[0]));
1115 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1116 GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
1117 }
1118 } else {
1119 /* raw service command */
1120 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
1121 ucmd->u.raw.direction);
1122 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1123 0xffffffffUL);
1124 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
1125 ucmd->u.raw.sdlen);
1126 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
1127 ucmd->u.raw.clen);
1128 bcopy(ucmd->u.raw.cmd, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
1129 12);
1130 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1131 ucmd->u.raw.target;
1132 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1133 ucmd->u.raw.lun;
1134 gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1135 ucmd->u.raw.bus;
1136 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
1137 ucmd->u.raw.sense_len);
1138 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
1139 gdt_ccb_vtop(gdt, gccb) +
1140 offsetof(struct gdt_ccb, gc_scratch[ucmd->u.raw.sdlen]));
1141 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1142 1);
1143 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1144 GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1145 offsetof(struct gdt_ccb, gc_scratch[0]));
1146 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1147 GDT_SG_LEN, ucmd->u.raw.sdlen);
1148 }
1149
1150 *lock = splcam();
1151 gdt_stat.sg_count_act = 1;
1152 gdt->sc_copy_cmd(gdt, gccb);
1153 return (gccb);
1154}
1155
1156static void
1157gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
1158{
1159 int t;
1160
1161 t = ccb->ccb_h.target_id;
1162 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n",
1163 gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
1164
1165 switch (ccb->csio.cdb_io.cdb_bytes[0]) {
1166 case TEST_UNIT_READY:
1167 case START_STOP:
1168 break;
1169 case REQUEST_SENSE:
1170 GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
1171 break;
1172 case INQUIRY:
1173 {
1174 struct scsi_inquiry_data *inq;
1175
1176 inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
1177 bzero(inq, sizeof(struct scsi_inquiry_data));
1178 inq->device = (gdt->sc_hdr[t].hd_devtype & 4) ?
1179 T_CDROM : T_DIRECT;
1180 inq->dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
1181 inq->version = SCSI_REV_2;
1182 inq->response_format = 2;
1183 inq->additional_length = 32;
1184 inq->flags = SID_CmdQue | SID_Sync;
1185 strcpy(inq->vendor, "IIR ");
1186 sprintf(inq->product, "Host Drive #%02d", t);
1187 strcpy(inq->revision, " ");
1188 break;
1189 }
1190 case MODE_SENSE_6:
1191 {
1192 struct mpd_data {
1193 struct scsi_mode_hdr_6 hd;
1194 struct scsi_mode_block_descr bd;
1195 struct scsi_control_page cp;
1196 } *mpd;
1197 u_int8_t page;
1198
1199 mpd = (struct mpd_data *)ccb->csio.data_ptr;
1200 bzero(mpd, sizeof(struct mpd_data));
1201 mpd->hd.datalen = sizeof(struct scsi_mode_hdr_6) +
1202 sizeof(struct scsi_mode_block_descr);
1203 mpd->hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
1204 mpd->hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
1205 mpd->bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
1206 mpd->bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
1207 mpd->bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
1208 page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
1209 switch (page) {
1210 default:
1211 GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
1212 break;
1213 }
1214 break;
1215 }
1216 case READ_CAPACITY:
1217 {
1218 struct scsi_read_capacity_data *rcd;
1219
1220 rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;
1221 bzero(rcd, sizeof(struct scsi_read_capacity_data));
1222 scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd->addr);
1223 scsi_ulto4b(GDT_SECTOR_SIZE, rcd->length);
1224 break;
1225 }
1226 default:
1227 GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n",
1228 ccb->csio.cdb_io.cdb_bytes[0]));
1229 break;
1230 }
1231 ccb->ccb_h.status = CAM_REQ_CMP;
1232 --gdt_stat.io_count_act;
1233 xpt_done(ccb);
1234}
1235
1236static void
1237gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1238{
1239 bus_addr_t *busaddrp;
1240
1241 busaddrp = (bus_addr_t *)arg;
1242 *busaddrp = dm_segs->ds_addr;
1243}
1244
1245static void
1246gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1247{
1248 struct gdt_ccb *gccb;
1249 union ccb *ccb;
1250 struct gdt_softc *gdt;
1251 int i, lock;
1252
1253 lock = splcam();
1254
1255 gccb = (struct gdt_ccb *)arg;
1256 ccb = gccb->gc_ccb;
1257 gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
1258
1259 GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n",
1260 gdt, gccb, dm_segs, nseg, error));
1261 gdt_stat.sg_count_act = nseg;
1262 if (nseg > gdt_stat.sg_count_max)
1263 gdt_stat.sg_count_max = nseg;
1264
1265 /* Copy the segments into our SG list */
1266 if (gccb->gc_service == GDT_CACHESERVICE) {
1267 for (i = 0; i < nseg; ++i) {
1268 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1269 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1270 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1271 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1272 dm_segs++;
1273 }
1274 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1275 nseg);
1276 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1277 0xffffffffUL);
1278
1279 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1280 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1281 } else {
1282 for (i = 0; i < nseg; ++i) {
1283 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1284 i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1285 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1286 i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1287 dm_segs++;
1288 }
1289 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1290 nseg);
1291 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1292 0xffffffffUL);
1293
1294 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1295 nseg * GDT_SG_SZ, sizeof(u_int32_t));
1296 }
1297
1298 if (nseg != 0) {
1299 bus_dmasync_op_t op;
1300
1301 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1302 op = BUS_DMASYNC_PREREAD;
1303 else
1304 op = BUS_DMASYNC_PREWRITE;
1305 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, op);
1306 }
1307
1308 /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
1309 * because command semaphore is already set!
1310 */
1311
1312 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1313 /* timeout handling */
1314 ccb->ccb_h.timeout_ch =
1315 timeout(iir_timeout, (caddr_t)gccb,
1316 (ccb->ccb_h.timeout * hz) / 1000);
1317
1318 gdt->sc_copy_cmd(gdt, gccb);
1319 splx(lock);
1320}
1321
1322
1323static void
1324iir_action( struct cam_sim *sim, union ccb *ccb )
1325{
1326 struct gdt_softc *gdt;
1327 int lock, bus, target, lun;
1328
1329 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1330 ccb->ccb_h.ccb_sim_ptr = sim;
1331 bus = cam_sim_bus(sim);
1332 target = ccb->ccb_h.target_id;
1333 lun = ccb->ccb_h.target_lun;
1334 GDT_DPRINTF(GDT_D_CMD,
1335 ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n",
1336 gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0],
1337 bus, target, lun));
1338 ++gdt_stat.io_count_act;
1339 if (gdt_stat.io_count_act > gdt_stat.io_count_max)
1340 gdt_stat.io_count_max = gdt_stat.io_count_act;
1341
1342 switch (ccb->ccb_h.func_code) {
1343 case XPT_SCSI_IO:
1344 lock = splcam();
1345 TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1346 ++gdt_stat.req_queue_act;
1347 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1348 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1349 splx(lock);
1350 gdt_next(gdt);
1351 break;
1352 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
1353 case XPT_ABORT: /* Abort the specified CCB */
1354 /* XXX Implement */
1355 ccb->ccb_h.status = CAM_REQ_INVALID;
1356 --gdt_stat.io_count_act;
1357 xpt_done(ccb);
1358 break;
1359 case XPT_SET_TRAN_SETTINGS:
1360 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1361 --gdt_stat.io_count_act;
1362 xpt_done(ccb);
1363 break;
1364 case XPT_GET_TRAN_SETTINGS:
1365 /* Get default/user set transfer settings for the target */
1366 {
1367 struct ccb_trans_settings *cts;
1368 u_int target_mask;
1369
1370 cts = &ccb->cts;
1371 target_mask = 0x01 << target;
1372 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) {
1373 cts->flags = CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB;
1374 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1375 cts->sync_period = 25; /* 10MHz */
1376 if (cts->sync_period != 0)
1377 cts->sync_offset = 15;
1378
1379 cts->valid = CCB_TRANS_SYNC_RATE_VALID
1380 | CCB_TRANS_SYNC_OFFSET_VALID
1381 | CCB_TRANS_BUS_WIDTH_VALID
1382 | CCB_TRANS_DISC_VALID
1383 | CCB_TRANS_TQ_VALID;
1384 ccb->ccb_h.status = CAM_REQ_CMP;
1385 } else {
1386 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1387 }
1388 --gdt_stat.io_count_act;
1389 xpt_done(ccb);
1390 break;
1391 }
1392 case XPT_CALC_GEOMETRY:
1393 {
1394 struct ccb_calc_geometry *ccg;
1395 u_int32_t secs_per_cylinder;
1396
1397 ccg = &ccb->ccg;
1398 ccg->heads = gdt->sc_hdr[target].hd_heads;
1399 ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
1400 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1401 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1402 ccb->ccb_h.status = CAM_REQ_CMP;
1403 --gdt_stat.io_count_act;
1404 xpt_done(ccb);
1405 break;
1406 }
1407 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
1408 {
1409 /* XXX Implement */
1410 ccb->ccb_h.status = CAM_REQ_CMP;
1411 --gdt_stat.io_count_act;
1412 xpt_done(ccb);
1413 break;
1414 }
1415 case XPT_TERM_IO: /* Terminate the I/O process */
1416 /* XXX Implement */
1417 ccb->ccb_h.status = CAM_REQ_INVALID;
1418 --gdt_stat.io_count_act;
1419 xpt_done(ccb);
1420 break;
1421 case XPT_PATH_INQ: /* Path routing inquiry */
1422 {
1423 struct ccb_pathinq *cpi = &ccb->cpi;
1424
1425 cpi->version_num = 1;
1426 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1427 cpi->hba_inquiry |= PI_WIDE_16;
1428 cpi->target_sprt = 1;
1429 cpi->hba_misc = 0;
1430 cpi->hba_eng_cnt = 0;
1431 if (bus == gdt->sc_virt_bus)
1432 cpi->max_target = GDT_MAX_HDRIVES - 1;
1433 else if (gdt->sc_class & GDT_FC)
1434 cpi->max_target = GDT_MAXID_FC - 1;
1435 else
1436 cpi->max_target = GDT_MAXID - 1;
1437 cpi->max_lun = 7;
1438 cpi->unit_number = cam_sim_unit(sim);
1439 cpi->bus_id = bus;
1440 cpi->initiator_id =
1441 (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
1442 cpi->base_transfer_speed = 3300;
1443 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1444 strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
1445 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1446 cpi->ccb_h.status = CAM_REQ_CMP;
1447 --gdt_stat.io_count_act;
1448 xpt_done(ccb);
1449 break;
1450 }
1451 default:
1452 GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n",
1453 gdt, ccb->ccb_h.func_code));
1454 ccb->ccb_h.status = CAM_REQ_INVALID;
1455 --gdt_stat.io_count_act;
1456 xpt_done(ccb);
1457 break;
1458 }
1459}
1460
1461static void
1462iir_poll( struct cam_sim *sim )
1463{
1464 struct gdt_softc *gdt;
1465
1466 gdt = (struct gdt_softc *)cam_sim_softc( sim );
1467 GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
1468 iir_intr(gdt);
1469}
1470
1471static void
1472iir_timeout(void *arg)
1473{
1474 GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", arg));
1475}
1476
1477static void
1478iir_watchdog(void *arg)
1479{
1480 struct gdt_softc *gdt;
1481
1482 gdt = (struct gdt_softc *)arg;
1483 GDT_DPRINTF(GDT_D_DEBUG, ("iir_watchdog(%p)\n", gdt));
1484
1485 {
1486 int ccbs = 0, ucmds = 0, frees = 0, pends = 0;
1487 struct gdt_ccb *p;
1488 struct ccb_hdr *h;
1489 struct gdt_ucmd *u;
1490
1491 for (h = TAILQ_FIRST(&gdt->sc_ccb_queue); h != NULL;
1492 h = TAILQ_NEXT(h, sim_links.tqe))
1493 ccbs++;
1494 for (u = TAILQ_FIRST(&gdt->sc_ucmd_queue); u != NULL;
1495 u = TAILQ_NEXT(u, links))
1496 ucmds++;
1497 for (p = SLIST_FIRST(&gdt->sc_free_gccb); p != NULL;
1498 p = SLIST_NEXT(p, sle))
1499 frees++;
1500 for (p = SLIST_FIRST(&gdt->sc_pending_gccb); p != NULL;
1501 p = SLIST_NEXT(p, sle))
1502 pends++;
1503
1504 GDT_DPRINTF(GDT_D_TIMEOUT, ("ccbs %d ucmds %d frees %d pends %d\n",
1505 ccbs, ucmds, frees, pends));
1506 }
1507
1508 timeout(iir_watchdog, (caddr_t)gdt, hz * 15);
1509}
1510
1511static void
1512iir_shutdown( void *arg, int howto )
1513{
1514 struct gdt_softc *gdt;
1515 struct gdt_ccb *gccb;
1516 gdt_ucmd_t *ucmd;
1517 int lock, i;
1518
1519 gdt = (struct gdt_softc *)arg;
1520 GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
1521
1522 printf("iir%d: Flushing all Host Drives. Please wait ... ",
1523 gdt->sc_hanum);
1524
1525 /* allocate ucmd buffer */
1526 ucmd = malloc(sizeof(gdt_ucmd_t), M_DEVBUF, M_NOWAIT);
1527 if (ucmd == NULL) {
1528 printf("iir%d: iir_shutdown(): Cannot allocate resource\n",
1529 gdt->sc_hanum);
1530 return;
1531 }
1532 bzero(ucmd, sizeof(gdt_ucmd_t));
1533
1534 /* wait for pending IOs */
1535 lock = splcam();
1536 gdt->sc_state = GDT_SHUTDOWN;
1537 splx(lock);
1538 if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
377d4740 1539 (void) tsleep((void *)gccb, PCATCH, "iirshw", 100 * hz);
984263bc
MD
1540
1541 /* flush */
1542 for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
1543 if (gdt->sc_hdr[i].hd_present) {
1544 ucmd->service = GDT_CACHESERVICE;
1545 ucmd->OpCode = GDT_FLUSH;
1546 ucmd->u.cache.DeviceNo = i;
1547 lock = splcam();
1548 TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
1549 ucmd->complete_flag = FALSE;
1550 splx(lock);
1551 gdt_next(gdt);
1552 if (!ucmd->complete_flag)
377d4740 1553 (void) tsleep((void *)ucmd, PCATCH, "iirshw", 10*hz);
984263bc
MD
1554 }
1555 }
1556
1557 free(ucmd, M_DEVBUF);
1558 printf("Done.\n");
1559}
1560
1561void
1562iir_intr(void *arg)
1563{
1564 struct gdt_softc *gdt = arg;
1565 struct gdt_intr_ctx ctx;
1566 int lock = 0;
1567 struct gdt_ccb *gccb;
1568 gdt_ucmd_t *ucmd;
1569 u_int32_t cnt;
1570
1571 GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
1572
1573 /* If polling and we were not called from gdt_wait, just return */
1574 if ((gdt->sc_state & GDT_POLLING) &&
1575 !(gdt->sc_state & GDT_POLL_WAIT))
1576 return;
1577
1578 if (!(gdt->sc_state & GDT_POLLING))
1579 lock = splcam();
1580 gdt_wait_index = 0;
1581
1582 ctx.istatus = gdt->sc_get_status(gdt);
1583 if (!ctx.istatus) {
1584 if (!(gdt->sc_state & GDT_POLLING))
1585 splx(lock);
1586 gdt->sc_status = GDT_S_NO_STATUS;
1587 return;
1588 }
1589
1590 gdt->sc_intr(gdt, &ctx);
1591
1592 gdt->sc_status = ctx.cmd_status;
1593 gdt->sc_service = ctx.service;
1594 gdt->sc_info = ctx.info;
1595 gdt->sc_info2 = ctx.info2;
1596
1597 if (gdt->sc_state & GDT_POLL_WAIT) {
1598 gdt_wait_gdt = gdt;
1599 gdt_wait_index = ctx.istatus;
1600 }
1601
1602 if (ctx.istatus == GDT_ASYNCINDEX) {
1603 gdt_async_event(gdt, ctx.service);
1604 if (!(gdt->sc_state & GDT_POLLING))
1605 splx(lock);
1606 return;
1607 }
1608 if (ctx.istatus == GDT_SPEZINDEX) {
1609 GDT_DPRINTF(GDT_D_INVALID,
1610 ("iir%d: Service unknown or not initialized!\n",
1611 gdt->sc_hanum));
1612 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1613 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1614 gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
1615 if (!(gdt->sc_state & GDT_POLLING))
1616 splx(lock);
1617 return;
1618 }
1619
1620 gccb = &gdt->sc_gccbs[ctx.istatus - 2];
1621 ctx.service = gccb->gc_service;
1622
1623 switch (gccb->gc_flags) {
1624 case GDT_GCF_UNUSED:
1625 GDT_DPRINTF(GDT_D_INVALID, ("iir%d: Index (%d) to unused command!\n",
1626 gdt->sc_hanum, ctx.istatus));
1627 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1628 gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1629 gdt->sc_dvr.eu.driver.index = ctx.istatus;
1630 gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
1631 gdt_free_ccb(gdt, gccb);
1632 /* fallthrough */
1633
1634 case GDT_GCF_INTERNAL:
1635 if (!(gdt->sc_state & GDT_POLLING))
1636 splx(lock);
1637 break;
1638
1639 case GDT_GCF_IOCTL:
1640 ucmd = gccb->gc_ucmd;
1641 if (gdt->sc_status == GDT_S_BSY) {
1642 GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n",
1643 gdt, gccb));
1644 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
1645 if (!(gdt->sc_state & GDT_POLLING))
1646 splx(lock);
1647 } else {
1648 ucmd->status = gdt->sc_status;
1649 ucmd->info = gdt->sc_info;
1650 ucmd->complete_flag = TRUE;
1651 if (ucmd->service == GDT_CACHESERVICE) {
1652 if (ucmd->OpCode == GDT_IOCTL) {
1653 cnt = ucmd->u.ioctl.param_size;
1654 if (cnt != 0)
1655 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1656 } else {
1657 cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1658 if (cnt != 0)
1659 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1660 }
1661 } else {
1662 cnt = ucmd->u.raw.sdlen;
1663 if (cnt != 0)
1664 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1665 if (ucmd->u.raw.sense_len != 0)
1666 bcopy(gccb->gc_scratch, ucmd->data, cnt);
1667 }
1668 gdt_free_ccb(gdt, gccb);
1669 if (!(gdt->sc_state & GDT_POLLING))
1670 splx(lock);
1671 /* wakeup */
1672 wakeup(ucmd);
1673 }
1674 gdt_next(gdt);
1675 break;
1676
1677 default:
1678 gdt_free_ccb(gdt, gccb);
1679 gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
1680 if (!(gdt->sc_state & GDT_POLLING))
1681 splx(lock);
1682 gdt_next(gdt);
1683 break;
1684 }
1685}
1686
1687int
1688gdt_async_event(struct gdt_softc *gdt, int service)
1689{
1690 struct gdt_ccb *gccb;
1691
1692 GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
1693
1694 if (service == GDT_SCREENSERVICE) {
1695 if (gdt->sc_status == GDT_MSG_REQUEST) {
1696 while (gdt->sc_test_busy(gdt))
1697 DELAY(1);
1698 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1699 gccb = gdt_get_ccb(gdt);
1700 if (gccb == NULL) {
1701 printf("iir%d: No free command index found\n",
1702 gdt->sc_hanum);
1703 return (1);
1704 }
1705 gccb->gc_service = service;
1706 gccb->gc_flags = GDT_GCF_SCREEN;
1707 gdt->sc_set_sema0(gdt);
1708 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1709 gccb->gc_cmd_index);
1710 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1711 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1712 GDT_MSG_INV_HANDLE);
1713 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1714 gdt_ccb_vtop(gdt, gccb) +
1715 offsetof(struct gdt_ccb, gc_scratch[0]));
1716 gdt->sc_cmd_off = 0;
1717 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1718 sizeof(u_int32_t));
1719 gdt->sc_cmd_cnt = 0;
1720 gdt->sc_copy_cmd(gdt, gccb);
1721 printf("iir%d: [PCI %d/%d] ",
1722 gdt->sc_hanum,gdt->sc_bus,gdt->sc_slot);
1723 gdt->sc_release_event(gdt);
1724 }
1725
1726 } else {
1727 if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
1728 gdt->sc_dvr.size = 0;
1729 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1730 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1731 /* severity and event_string already set! */
1732 } else {
1733 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
1734 gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1735 gdt->sc_dvr.eu.async.service = service;
1736 gdt->sc_dvr.eu.async.status = gdt->sc_status;
1737 gdt->sc_dvr.eu.async.info = gdt->sc_info;
1738 *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord = gdt->sc_info2;
1739 }
1740 gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
1741 printf("iir%d: %s\n", gdt->sc_hanum, gdt->sc_dvr.event_string);
1742 }
1743
1744 return (0);
1745}
1746
1747int
1748gdt_sync_event(struct gdt_softc *gdt, int service,
1749 u_int8_t index, struct gdt_ccb *gccb)
1750{
1751 union ccb *ccb;
1752 bus_dmasync_op_t op;
1753
1754 GDT_DPRINTF(GDT_D_INTR,
1755 ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
1756
1757 ccb = gccb->gc_ccb;
1758
1759 if (service == GDT_SCREENSERVICE) {
1760 u_int32_t msg_len;
1761
1762 msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
1763 if (msg_len)
1764 if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1765 gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
1766 gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
1767 printf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
1768 }
1769
1770 if (gccb->gc_scratch[GDT_SCR_MSG_EXT] &&
1771 !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
1772 while (gdt->sc_test_busy(gdt))
1773 DELAY(1);
1774 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1775 gccb = gdt_get_ccb(gdt);
1776 if (gccb == NULL) {
1777 printf("iir%d: No free command index found\n",
1778 gdt->sc_hanum);
1779 return (1);
1780 }
1781 gccb->gc_service = service;
1782 gccb->gc_flags = GDT_GCF_SCREEN;
1783 gdt->sc_set_sema0(gdt);
1784 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1785 gccb->gc_cmd_index);
1786 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1787 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1788 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1789 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1790 gdt_ccb_vtop(gdt, gccb) +
1791 offsetof(struct gdt_ccb, gc_scratch[0]));
1792 gdt->sc_cmd_off = 0;
1793 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1794 sizeof(u_int32_t));
1795 gdt->sc_cmd_cnt = 0;
1796 gdt->sc_copy_cmd(gdt, gccb);
1797 gdt->sc_release_event(gdt);
1798 return (0);
1799 }
1800
1801 if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1802 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
1803 /* default answers (getchar() not possible) */
1804 if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
1805 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
1806 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
1807 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
1808 } else {
1809 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN,
1810 gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
1811 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
1812 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
1813 gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
1814 }
1815 gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
1816 gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
1817 while (gdt->sc_test_busy(gdt))
1818 DELAY(1);
1819 bzero(gdt->sc_cmd, GDT_CMD_SZ);
1820 gccb = gdt_get_ccb(gdt);
1821 if (gccb == NULL) {
1822 printf("iir%d: No free command index found\n",
1823 gdt->sc_hanum);
1824 return (1);
1825 }
1826 gccb->gc_service = service;
1827 gccb->gc_flags = GDT_GCF_SCREEN;
1828 gdt->sc_set_sema0(gdt);
1829 gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1830 gccb->gc_cmd_index);
1831 gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
1832 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1833 gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1834 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1835 gdt_ccb_vtop(gdt, gccb) +
1836 offsetof(struct gdt_ccb, gc_scratch[0]));
1837 gdt->sc_cmd_off = 0;
1838 gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1839 sizeof(u_int32_t));
1840 gdt->sc_cmd_cnt = 0;
1841 gdt->sc_copy_cmd(gdt, gccb);
1842 gdt->sc_release_event(gdt);
1843 return (0);
1844 }
1845 printf("\n");
1846 return (0);
1847 } else {
1848 untimeout(iir_timeout, gccb, ccb->ccb_h.timeout_ch);
1849 if (gdt->sc_status == GDT_S_BSY) {
1850 GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n",
1851 gdt, gccb));
1852 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1853 ++gdt_stat.req_queue_act;
1854 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1855 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1856 return (2);
1857 }
1858
1859 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1860 op = BUS_DMASYNC_POSTREAD;
1861 else
1862 op = BUS_DMASYNC_POSTWRITE;
1863 bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, op);
1864
1865 ccb->csio.resid = 0;
1866 if (gdt->sc_status == GDT_S_OK) {
1867 ccb->ccb_h.status = CAM_REQ_CMP;
1868 } else {
1869 /* error */
1870 if (gccb->gc_service == GDT_CACHESERVICE) {
1871 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
1872 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1873 bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
1874 ccb->csio.sense_data.error_code =
1875 SSD_CURRENT_ERROR | SSD_ERRCODE_VALID;
1876 ccb->csio.sense_data.flags = SSD_KEY_NOT_READY;
1877
1878 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
1879 gdt->sc_dvr.eu.sync.ionode = gdt->sc_hanum;
1880 gdt->sc_dvr.eu.sync.service = service;
1881 gdt->sc_dvr.eu.sync.status = gdt->sc_status;
1882 gdt->sc_dvr.eu.sync.info = gdt->sc_info;
1883 gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
1884 if (gdt->sc_status >= 0x8000)
1885 gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
1886 else
1887 gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
1888 } else {
1889 /* raw service */
1890 if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
1891 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1892 } else {
1893 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
1894 ccb->csio.scsi_status = gdt->sc_info;
1895 bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
1896 ccb->csio.sense_len);
1897 }
1898 }
1899 }
1900 --gdt_stat.io_count_act;
1901 xpt_done(ccb);
1902 }
1903 return (0);
1904}
1905
1906/* Controller event handling functions */
1907gdt_evt_str *gdt_store_event(u_int16_t source, u_int16_t idx,
1908 gdt_evt_data *evt)
1909{
1910 gdt_evt_str *e;
1911 struct timeval tv;
1912
1913 GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
1914 if (source == 0) /* no source -> no event */
1915 return 0;
1916
1917 if (ebuffer[elastidx].event_source == source &&
1918 ebuffer[elastidx].event_idx == idx &&
1919 ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
1920 !memcmp((char *)&ebuffer[elastidx].event_data.eu,
1921 (char *)&evt->eu, evt->size)) ||
1922 (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
1923 !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
1924 (char *)&evt->event_string)))) {
1925 e = &ebuffer[elastidx];
1926 getmicrotime(&tv);
1927 e->last_stamp = tv.tv_sec;
1928 ++e->same_count;
1929 } else {
1930 if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
1931 ++elastidx;
1932 if (elastidx == GDT_MAX_EVENTS)
1933 elastidx = 0;
1934 if (elastidx == eoldidx) { /* reached mark ? */
1935 ++eoldidx;
1936 if (eoldidx == GDT_MAX_EVENTS)
1937 eoldidx = 0;
1938 }
1939 }
1940 e = &ebuffer[elastidx];
1941 e->event_source = source;
1942 e->event_idx = idx;
1943 getmicrotime(&tv);
1944 e->first_stamp = e->last_stamp = tv.tv_sec;
1945 e->same_count = 1;
1946 e->event_data = *evt;
1947 e->application = 0;
1948 }
1949 return e;
1950}
1951
1952int gdt_read_event(int handle, gdt_evt_str *estr)
1953{
1954 gdt_evt_str *e;
1955 int eindex, lock;
1956
1957 GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
1958 lock = splcam();
1959 if (handle == -1)
1960 eindex = eoldidx;
1961 else
1962 eindex = handle;
1963 estr->event_source = 0;
1964
1965 if (eindex >= GDT_MAX_EVENTS) {
1966 splx(lock);
1967 return eindex;
1968 }
1969 e = &ebuffer[eindex];
1970 if (e->event_source != 0) {
1971 if (eindex != elastidx) {
1972 if (++eindex == GDT_MAX_EVENTS)
1973 eindex = 0;
1974 } else {
1975 eindex = -1;
1976 }
1977 memcpy(estr, e, sizeof(gdt_evt_str));
1978 }
1979 splx(lock);
1980 return eindex;
1981}
1982
1983void gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
1984{
1985 gdt_evt_str *e;
1986 int found = FALSE;
1987 int eindex, lock;
1988
1989 GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
1990 lock = splcam();
1991 eindex = eoldidx;
1992 for (;;) {
1993 e = &ebuffer[eindex];
1994 if (e->event_source == 0)
1995 break;
1996 if ((e->application & application) == 0) {
1997 e->application |= application;
1998 found = TRUE;
1999 break;
2000 }
2001 if (eindex == elastidx)
2002 break;
2003 if (++eindex == GDT_MAX_EVENTS)
2004 eindex = 0;
2005 }
2006 if (found)
2007 memcpy(estr, e, sizeof(gdt_evt_str));
2008 else
2009 estr->event_source = 0;
2010 splx(lock);
2011}
2012
2013void gdt_clear_events()
2014{
2015 GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
2016
2017 eoldidx = elastidx = 0;
2018 ebuffer[0].event_source = 0;
2019}