Merge branch 'vendor/BINUTILS221'
[dragonfly.git] / sys / dev / disk / mpt / mpt_user.c
1 /*-
2  * Copyright (c) 2008 Yahoo!, Inc.
3  * All rights reserved.
4  * Written by: John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the author nor the names of any co-contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * LSI MPT-Fusion Host Adapter FreeBSD userland interface
31  *
32  * $FreeBSD: src/sys/dev/mpt/mpt_user.c,v 1.4 2009/05/20 17:29:21 imp Exp $
33  */
34
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/errno.h>
38 #include <sys/ioccom.h>
39 #include <sys/device.h>
40 #include <sys/mpt_ioctl.h>
41
42 #include <dev/disk/mpt/mpt.h>
43
44 struct mpt_user_raid_action_result {
45         uint32_t        volume_status;
46         uint32_t        action_data[4];
47         uint16_t        action_status;
48 };
49
50 struct mpt_page_memory {
51         bus_dma_tag_t   tag;
52         bus_dmamap_t    map;
53         bus_addr_t      paddr;
54         void            *vaddr;
55 };
56
57 static mpt_probe_handler_t      mpt_user_probe;
58 static mpt_attach_handler_t     mpt_user_attach;
59 static mpt_enable_handler_t     mpt_user_enable;
60 static mpt_ready_handler_t      mpt_user_ready;
61 static mpt_event_handler_t      mpt_user_event;
62 static mpt_reset_handler_t      mpt_user_reset;
63 static mpt_detach_handler_t     mpt_user_detach;
64
65 static struct mpt_personality mpt_user_personality = {
66         .name           = "mpt_user",
67         .probe          = mpt_user_probe,
68         .attach         = mpt_user_attach,
69         .enable         = mpt_user_enable,
70         .ready          = mpt_user_ready,
71         .event          = mpt_user_event,
72         .reset          = mpt_user_reset,
73         .detach         = mpt_user_detach,
74 };
75
76 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
77
78 static mpt_reply_handler_t      mpt_user_reply_handler;
79
80 static int              mpt_open(struct dev_open_args *ap);
81 static int              mpt_close(struct dev_close_args *ap);
82 static int              mpt_ioctl(struct dev_ioctl_args *ap);
83
84 static struct dev_ops mpt_cdevsw = {
85         .d_open =       mpt_open,
86         .d_close =      mpt_close,
87         .d_ioctl =      mpt_ioctl,
88 };
89
90 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
91
92 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
93
94 int
95 mpt_user_probe(struct mpt_softc *mpt)
96 {
97
98         /* Attach to every controller. */
99         return (0);
100 }
101
102 int
103 mpt_user_attach(struct mpt_softc *mpt)
104 {
105         mpt_handler_t handler;
106         int error, unit;
107
108         MPT_LOCK(mpt);
109         handler.reply_handler = mpt_user_reply_handler;
110         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
111                                      &user_handler_id);
112         MPT_UNLOCK(mpt);
113         if (error != 0) {
114                 mpt_prt(mpt, "Unable to register user handler!\n");
115                 return (error);
116         }
117         unit = device_get_unit(mpt->dev);
118         mpt->cdev = make_dev(&mpt_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
119             "mpt%d", unit);
120         if (mpt->cdev == NULL) {
121                 MPT_LOCK(mpt);
122                 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
123                     user_handler_id);
124                 MPT_UNLOCK(mpt);
125                 return (ENOMEM);
126         }
127         mpt->cdev->si_drv1 = mpt;
128         return (0);
129 }
130
131 int
132 mpt_user_enable(struct mpt_softc *mpt)
133 {
134
135         return (0);
136 }
137
138 void
139 mpt_user_ready(struct mpt_softc *mpt)
140 {
141 }
142
143 int
144 mpt_user_event(struct mpt_softc *mpt, request_t *req,
145     MSG_EVENT_NOTIFY_REPLY *msg)
146 {
147
148         /* Someday we may want to let a user daemon listen for events? */
149         return (0);
150 }
151
152 void
153 mpt_user_reset(struct mpt_softc *mpt, int type)
154 {
155 }
156
157 void
158 mpt_user_detach(struct mpt_softc *mpt)
159 {
160         mpt_handler_t handler;
161
162         /* XXX: do a purge of pending requests? */
163         destroy_dev(mpt->cdev);
164
165         MPT_LOCK(mpt);
166         handler.reply_handler = mpt_user_reply_handler;
167         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
168             user_handler_id);
169         MPT_UNLOCK(mpt);
170 }
171
172 static int
173 mpt_open(struct dev_open_args *ap)
174 {
175
176         return (0);
177 }
178
179 static int
180 mpt_close(struct dev_close_args *ap)
181 {
182
183         return (0);
184 }
185
186 static int
187 mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
188     size_t len)
189 {
190         struct mpt_map_info mi;
191         int error;
192
193         page_mem->vaddr = NULL;
194
195         /* Limit requests to 16M. */
196         if (len > 16 * 1024 * 1024)
197                 return (ENOSPC);
198         error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
199             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
200             len, 1, len, 0, &page_mem->tag);
201         if (error)
202                 return (error);
203         error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
204             BUS_DMA_NOWAIT, &page_mem->map);
205         if (error) {
206                 bus_dma_tag_destroy(page_mem->tag);
207                 return (error);
208         }
209         mi.mpt = mpt;
210         error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
211             len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
212         if (error == 0)
213                 error = mi.error;
214         if (error) {
215                 bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
216                 bus_dma_tag_destroy(page_mem->tag);
217                 page_mem->vaddr = NULL;
218                 return (error);
219         }
220         page_mem->paddr = mi.phys;
221         return (0);
222 }
223
224 static void
225 mpt_free_buffer(struct mpt_page_memory *page_mem)
226 {
227
228         if (page_mem->vaddr == NULL)
229                 return;
230         bus_dmamap_unload(page_mem->tag, page_mem->map);
231         bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
232         bus_dma_tag_destroy(page_mem->tag);
233         page_mem->vaddr = NULL;
234 }
235
236 static int
237 mpt_user_read_cfg_header(struct mpt_softc *mpt,
238     struct mpt_cfg_page_req *page_req)
239 {
240         request_t  *req;
241         cfgparms_t params;
242         MSG_CONFIG *cfgp;
243         int         error;
244
245         req = mpt_get_request(mpt, TRUE);
246         if (req == NULL) {
247                 mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
248                 return (ENOMEM);
249         }
250
251         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
252         params.PageVersion = 0;
253         params.PageLength = 0;
254         params.PageNumber = page_req->header.PageNumber;
255         params.PageType = page_req->header.PageType;
256         params.PageAddress = le32toh(page_req->page_address);
257         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
258                                   TRUE, 5000);
259         if (error != 0) {
260                 /*
261                  * Leave the request. Without resetting the chip, it's
262                  * still owned by it and we'll just get into trouble
263                  * freeing it now. Mark it as abandoned so that if it
264                  * shows up later it can be freed.
265                  */
266                 mpt_prt(mpt, "read_cfg_header timed out\n");
267                 return (ETIMEDOUT);
268         }
269
270         page_req->ioc_status = htole16(req->IOCStatus);
271         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
272                 cfgp = req->req_vbuf;
273                 bcopy(&cfgp->Header, &page_req->header,
274                     sizeof(page_req->header));
275         }
276         mpt_free_request(mpt, req);
277         return (0);
278 }
279
280 static int
281 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
282     struct mpt_page_memory *mpt_page)
283 {
284         CONFIG_PAGE_HEADER *hdr;
285         request_t    *req;
286         cfgparms_t    params;
287         int           error;
288
289         req = mpt_get_request(mpt, TRUE);
290         if (req == NULL) {
291                 mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
292                 return (ENOMEM);
293         }
294
295         hdr = mpt_page->vaddr;
296         params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
297         params.PageVersion = hdr->PageVersion;
298         params.PageLength = hdr->PageLength;
299         params.PageNumber = hdr->PageNumber;
300         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
301         params.PageAddress = le32toh(page_req->page_address);
302         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
303             le32toh(page_req->len), TRUE, 5000);
304         if (error != 0) {
305                 mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
306                 return (ETIMEDOUT);
307         }
308
309         page_req->ioc_status = htole16(req->IOCStatus);
310         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
311                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
312                     BUS_DMASYNC_POSTREAD);
313         mpt_free_request(mpt, req);
314         return (0);
315 }
316
317 static int
318 mpt_user_read_extcfg_header(struct mpt_softc *mpt,
319     struct mpt_ext_cfg_page_req *ext_page_req)
320 {
321         request_t  *req;
322         cfgparms_t params;
323         MSG_CONFIG_REPLY *cfgp;
324         int         error;
325
326         req = mpt_get_request(mpt, TRUE);
327         if (req == NULL) {
328                 mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
329                 return (ENOMEM);
330         }
331
332         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
333         params.PageVersion = ext_page_req->header.PageVersion;
334         params.PageLength = 0;
335         params.PageNumber = ext_page_req->header.PageNumber;
336         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
337         params.PageAddress = le32toh(ext_page_req->page_address);
338         params.ExtPageType = ext_page_req->header.ExtPageType;
339         params.ExtPageLength = 0;
340         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
341                                   TRUE, 5000);
342         if (error != 0) {
343                 /*
344                  * Leave the request. Without resetting the chip, it's
345                  * still owned by it and we'll just get into trouble
346                  * freeing it now. Mark it as abandoned so that if it
347                  * shows up later it can be freed.
348                  */
349                 mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
350                 return (ETIMEDOUT);
351         }
352
353         ext_page_req->ioc_status = htole16(req->IOCStatus);
354         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
355                 cfgp = req->req_vbuf;
356                 ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
357                 ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
358                 ext_page_req->header.PageType = cfgp->Header.PageType;
359                 ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
360                 ext_page_req->header.ExtPageType = cfgp->ExtPageType;
361         }
362         mpt_free_request(mpt, req);
363         return (0);
364 }
365
366 static int
367 mpt_user_read_extcfg_page(struct mpt_softc *mpt,
368     struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
369 {
370         CONFIG_EXTENDED_PAGE_HEADER *hdr;
371         request_t    *req;
372         cfgparms_t    params;
373         int           error;
374
375         req = mpt_get_request(mpt, TRUE);
376         if (req == NULL) {
377                 mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
378                 return (ENOMEM);
379         }
380
381         hdr = mpt_page->vaddr;
382         params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
383         params.PageVersion = hdr->PageVersion;
384         params.PageLength = 0;
385         params.PageNumber = hdr->PageNumber;
386         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
387         params.PageAddress = le32toh(ext_page_req->page_address);
388         params.ExtPageType = hdr->ExtPageType;
389         params.ExtPageLength = hdr->ExtPageLength;
390         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
391             le32toh(ext_page_req->len), TRUE, 5000);
392         if (error != 0) {
393                 mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
394                 return (ETIMEDOUT);
395         }
396
397         ext_page_req->ioc_status = htole16(req->IOCStatus);
398         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
399                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
400                     BUS_DMASYNC_POSTREAD);
401         mpt_free_request(mpt, req);
402         return (0);
403 }
404
405 static int
406 mpt_user_write_cfg_page(struct mpt_softc *mpt,
407     struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
408 {
409         CONFIG_PAGE_HEADER *hdr;
410         request_t    *req;
411         cfgparms_t    params;
412         u_int         hdr_attr;
413         int           error;
414
415         hdr = mpt_page->vaddr;
416         hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
417         if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
418             hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
419                 mpt_prt(mpt, "page type 0x%x not changeable\n",
420                         hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
421                 return (EINVAL);
422         }
423
424 #if     0
425         /*
426          * We shouldn't mask off other bits here.
427          */
428         hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
429 #endif
430
431         req = mpt_get_request(mpt, TRUE);
432         if (req == NULL)
433                 return (ENOMEM);
434
435         bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREWRITE);
436
437         /*
438          * There isn't any point in restoring stripped out attributes
439          * if you then mask them going down to issue the request.
440          */
441
442         params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
443         params.PageVersion = hdr->PageVersion;
444         params.PageLength = hdr->PageLength;
445         params.PageNumber = hdr->PageNumber;
446         params.PageAddress = le32toh(page_req->page_address);
447 #if     0
448         /* Restore stripped out attributes */
449         hdr->PageType |= hdr_attr;
450         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
451 #else
452         params.PageType = hdr->PageType;
453 #endif
454         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
455             le32toh(page_req->len), TRUE, 5000);
456         if (error != 0) {
457                 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
458                 return (ETIMEDOUT);
459         }
460
461         page_req->ioc_status = htole16(req->IOCStatus);
462         mpt_free_request(mpt, req);
463         return (0);
464 }
465
466 static int
467 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
468     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
469 {
470         MSG_RAID_ACTION_REPLY *reply;
471         struct mpt_user_raid_action_result *res;
472
473         if (req == NULL)
474                 return (TRUE);
475
476         if (reply_frame != NULL) {
477                 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
478                     BUS_DMASYNC_POSTREAD);
479                 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
480                 req->IOCStatus = le16toh(reply->IOCStatus);
481                 res = (struct mpt_user_raid_action_result *)
482                     (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
483                 res->action_status = reply->ActionStatus;
484                 res->volume_status = reply->VolumeStatus;
485                 bcopy(&reply->ActionData, res->action_data,
486                     sizeof(res->action_data));
487         }
488
489         req->state &= ~REQ_STATE_QUEUED;
490         req->state |= REQ_STATE_DONE;
491         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
492
493         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
494                 wakeup(req);
495         } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
496                 /*
497                  * Whew- we can free this request (late completion)
498                  */
499                 mpt_free_request(mpt, req);
500         }
501
502         return (TRUE);
503 }
504
505 /*
506  * We use the first part of the request buffer after the request frame
507  * to hold the action data and action status from the RAID reply.  The
508  * rest of the request buffer is used to hold the buffer for the
509  * action SGE.
510  */
511 static int
512 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
513         struct mpt_page_memory *mpt_page)
514 {
515         request_t *req;
516         struct mpt_user_raid_action_result *res;
517         MSG_RAID_ACTION_REQUEST *rap;
518         SGE_SIMPLE32 *se;
519         int error;
520
521         req = mpt_get_request(mpt, TRUE);
522         if (req == NULL)
523                 return (ENOMEM);
524         rap = req->req_vbuf;
525         memset(rap, 0, sizeof *rap);
526         rap->Action = raid_act->action;
527         rap->ActionDataWord = raid_act->action_data_word;
528         rap->Function = MPI_FUNCTION_RAID_ACTION;
529         rap->VolumeID = raid_act->volume_id;
530         rap->VolumeBus = raid_act->volume_bus;
531         rap->PhysDiskNum = raid_act->phys_disk_num;
532         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
533         if (mpt_page->vaddr != NULL && raid_act->len != 0) {
534                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
535                     BUS_DMASYNC_PREWRITE);
536                 se->Address = htole32(mpt_page->paddr);
537                 MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
538                 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
539                     MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
540                     MPI_SGE_FLAGS_END_OF_LIST |
541                     raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
542                     MPI_SGE_FLAGS_IOC_TO_HOST));
543         }
544         se->FlagsLength = htole32(se->FlagsLength);
545         rap->MsgContext = htole32(req->index | user_handler_id);
546
547         mpt_check_doorbell(mpt);
548         mpt_send_cmd(mpt, req);
549
550         error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
551             2000);
552         if (error != 0) {
553                 /*
554                  * Leave request so it can be cleaned up later.
555                  */
556                 mpt_prt(mpt, "mpt_user_raid_action timed out\n");
557                 return (error);
558         }
559
560         raid_act->ioc_status = htole16(req->IOCStatus);
561         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
562                 mpt_free_request(mpt, req);
563                 return (0);
564         }
565
566         res = (struct mpt_user_raid_action_result *)
567             (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
568         raid_act->volume_status = res->volume_status;
569         raid_act->action_status = res->action_status;
570         bcopy(res->action_data, raid_act->action_data,
571             sizeof(res->action_data));
572         if (mpt_page->vaddr != NULL)
573                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
574                     BUS_DMASYNC_POSTREAD);
575         mpt_free_request(mpt, req);
576         return (0);
577 }
578
579 #ifdef __x86_64__
580 #define PTRIN(p)                ((void *)(uintptr_t)(p))
581 #define PTROUT(v)               ((u_int32_t)(uintptr_t)(v))
582 #endif
583
584 static int
585 mpt_ioctl(struct dev_ioctl_args *ap)
586 {
587         struct mpt_softc *mpt;
588         struct mpt_cfg_page_req *page_req;
589         struct mpt_ext_cfg_page_req *ext_page_req;
590         struct mpt_raid_action *raid_act;
591         struct mpt_page_memory mpt_page;
592 #ifdef __x86_64__
593         struct mpt_cfg_page_req32 *page_req32;
594         struct mpt_cfg_page_req page_req_swab;
595         struct mpt_ext_cfg_page_req32 *ext_page_req32;
596         struct mpt_ext_cfg_page_req ext_page_req_swab;
597         struct mpt_raid_action32 *raid_act32;
598         struct mpt_raid_action raid_act_swab;
599 #endif
600         u_long cmd = ap->a_cmd;
601         caddr_t arg = ap->a_data;
602         struct cdev *kdev = ap->a_head.a_dev;
603         int error;
604
605         mpt = kdev->si_drv1;
606         page_req = (void *)arg;
607         ext_page_req = (void *)arg;
608         raid_act = (void *)arg;
609         mpt_page.vaddr = NULL;
610
611 #ifdef __x86_64__
612         /* Convert 32-bit structs to native ones. */
613         page_req32 = (void *)arg;
614         ext_page_req32 = (void *)arg;
615         raid_act32 = (void *)arg;
616         switch (cmd) {
617         case MPTIO_READ_CFG_HEADER32:
618         case MPTIO_READ_CFG_PAGE32:
619         case MPTIO_WRITE_CFG_PAGE32:
620                 page_req = &page_req_swab;
621                 page_req->header = page_req32->header;
622                 page_req->page_address = page_req32->page_address;
623                 page_req->buf = PTRIN(page_req32->buf);
624                 page_req->len = page_req32->len;
625                 page_req->ioc_status = page_req32->ioc_status;
626                 break;
627         case MPTIO_READ_EXT_CFG_HEADER32:
628         case MPTIO_READ_EXT_CFG_PAGE32:
629                 ext_page_req = &ext_page_req_swab;
630                 ext_page_req->header = ext_page_req32->header;
631                 ext_page_req->page_address = ext_page_req32->page_address;
632                 ext_page_req->buf = PTRIN(ext_page_req32->buf);
633                 ext_page_req->len = ext_page_req32->len;
634                 ext_page_req->ioc_status = ext_page_req32->ioc_status;
635                 break;
636         case MPTIO_RAID_ACTION32:
637                 raid_act = &raid_act_swab;
638                 raid_act->action = raid_act32->action;
639                 raid_act->volume_bus = raid_act32->volume_bus;
640                 raid_act->volume_id = raid_act32->volume_id;
641                 raid_act->phys_disk_num = raid_act32->phys_disk_num;
642                 raid_act->action_data_word = raid_act32->action_data_word;
643                 raid_act->buf = PTRIN(raid_act32->buf);
644                 raid_act->len = raid_act32->len;
645                 raid_act->volume_status = raid_act32->volume_status;
646                 bcopy(raid_act32->action_data, raid_act->action_data,
647                     sizeof(raid_act->action_data));
648                 raid_act->action_status = raid_act32->action_status;
649                 raid_act->ioc_status = raid_act32->ioc_status;
650                 raid_act->write = raid_act32->write;
651                 break;
652         }
653 #endif
654
655         switch (cmd) {
656 #ifdef __x86_64__
657         case MPTIO_READ_CFG_HEADER32:
658 #endif
659         case MPTIO_READ_CFG_HEADER:
660                 MPT_LOCK(mpt);
661                 error = mpt_user_read_cfg_header(mpt, page_req);
662                 MPT_UNLOCK(mpt);
663                 break;
664 #ifdef __x86_64__
665         case MPTIO_READ_CFG_PAGE32:
666 #endif
667         case MPTIO_READ_CFG_PAGE:
668                 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
669                 if (error)
670                         break;
671                 error = copyin(page_req->buf, mpt_page.vaddr,
672                     sizeof(CONFIG_PAGE_HEADER));
673                 if (error)
674                         break;
675                 MPT_LOCK(mpt);
676                 error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
677                 MPT_UNLOCK(mpt);
678                 if (error)
679                         break;
680                 error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
681                 break;
682 #ifdef __x86_64__
683         case MPTIO_READ_EXT_CFG_HEADER32:
684 #endif
685         case MPTIO_READ_EXT_CFG_HEADER:
686                 MPT_LOCK(mpt);
687                 error = mpt_user_read_extcfg_header(mpt, ext_page_req);
688                 MPT_UNLOCK(mpt);
689                 break;
690 #ifdef __x86_64__
691         case MPTIO_READ_EXT_CFG_PAGE32:
692 #endif
693         case MPTIO_READ_EXT_CFG_PAGE:
694                 error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
695                 if (error)
696                         break;
697                 error = copyin(ext_page_req->buf, mpt_page.vaddr,
698                     sizeof(CONFIG_EXTENDED_PAGE_HEADER));
699                 if (error)
700                         break;
701                 MPT_LOCK(mpt);
702                 error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
703                 MPT_UNLOCK(mpt);
704                 if (error)
705                         break;
706                 error = copyout(mpt_page.vaddr, ext_page_req->buf,
707                     ext_page_req->len);
708                 break;
709 #ifdef __x86_64__
710         case MPTIO_WRITE_CFG_PAGE32:
711 #endif
712         case MPTIO_WRITE_CFG_PAGE:
713                 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
714                 if (error)
715                         break;
716                 error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
717                 if (error)
718                         break;
719                 MPT_LOCK(mpt);
720                 error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
721                 MPT_UNLOCK(mpt);
722                 break;
723 #ifdef __x86_64__
724         case MPTIO_RAID_ACTION32:
725 #endif
726         case MPTIO_RAID_ACTION:
727                 if (raid_act->buf != NULL) {
728                         error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
729                         if (error)
730                                 break;
731                         error = copyin(raid_act->buf, mpt_page.vaddr,
732                             raid_act->len);
733                         if (error)
734                                 break;
735                 }
736                 MPT_LOCK(mpt);
737                 error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
738                 MPT_UNLOCK(mpt);
739                 if (error)
740                         break;
741                 if (raid_act->buf != NULL)
742                         error = copyout(mpt_page.vaddr, raid_act->buf,
743                             raid_act->len);
744                 break;
745         default:
746                 error = ENOIOCTL;
747                 break;
748         }
749
750         mpt_free_buffer(&mpt_page);
751
752         if (error)
753                 return (error);
754
755 #ifdef __x86_64__
756         /* Convert native structs to 32-bit ones. */
757         switch (cmd) {
758         case MPTIO_READ_CFG_HEADER32:
759         case MPTIO_READ_CFG_PAGE32:
760         case MPTIO_WRITE_CFG_PAGE32:
761                 page_req32->header = page_req->header;
762                 page_req32->page_address = page_req->page_address;
763                 page_req32->buf = PTROUT(page_req->buf);
764                 page_req32->len = page_req->len;
765                 page_req32->ioc_status = page_req->ioc_status;
766                 break;
767         case MPTIO_READ_EXT_CFG_HEADER32:
768         case MPTIO_READ_EXT_CFG_PAGE32:
769                 ext_page_req32->header = ext_page_req->header;
770                 ext_page_req32->page_address = ext_page_req->page_address;
771                 ext_page_req32->buf = PTROUT(ext_page_req->buf);
772                 ext_page_req32->len = ext_page_req->len;
773                 ext_page_req32->ioc_status = ext_page_req->ioc_status;
774                 break;
775         case MPTIO_RAID_ACTION32:
776                 raid_act32->action = raid_act->action;
777                 raid_act32->volume_bus = raid_act->volume_bus;
778                 raid_act32->volume_id = raid_act->volume_id;
779                 raid_act32->phys_disk_num = raid_act->phys_disk_num;
780                 raid_act32->action_data_word = raid_act->action_data_word;
781                 raid_act32->buf = PTROUT(raid_act->buf);
782                 raid_act32->len = raid_act->len;
783                 raid_act32->volume_status = raid_act->volume_status;
784                 bcopy(raid_act->action_data, raid_act32->action_data,
785                     sizeof(raid_act->action_data));
786                 raid_act32->action_status = raid_act->action_status;
787                 raid_act32->ioc_status = raid_act->ioc_status;
788                 raid_act32->write = raid_act->write;
789                 break;
790         }
791 #endif
792
793         return (0);
794 }