ebef580d82efa3fe7091678453330b788a88073c
[dragonfly.git] / sys / dev / disk / mpt / mpt_user.c
1 /*-
2  * Copyright (c) 2008 Yahoo!, Inc.
3  * All rights reserved.
4  * Written by: John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the author nor the names of any co-contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * LSI MPT-Fusion Host Adapter FreeBSD userland interface
31  *
32  * $FreeBSD: src/sys/dev/mpt/mpt_user.c,v 1.5 2011/03/06 12:48:15 marius Exp $
33  */
34
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/device.h>
38 #include <sys/errno.h>
39 #include <sys/mpt_ioctl.h>
40
41 #include <dev/disk/mpt/mpt.h>
42
43 struct mpt_user_raid_action_result {
44         uint32_t        volume_status;
45         uint32_t        action_data[4];
46         uint16_t        action_status;
47 };
48
49 struct mpt_page_memory {
50         bus_dma_tag_t   tag;
51         bus_dmamap_t    map;
52         bus_addr_t      paddr;
53         void            *vaddr;
54 };
55
56 static mpt_probe_handler_t      mpt_user_probe;
57 static mpt_attach_handler_t     mpt_user_attach;
58 static mpt_enable_handler_t     mpt_user_enable;
59 static mpt_ready_handler_t      mpt_user_ready;
60 static mpt_event_handler_t      mpt_user_event;
61 static mpt_reset_handler_t      mpt_user_reset;
62 static mpt_detach_handler_t     mpt_user_detach;
63
64 static struct mpt_personality mpt_user_personality = {
65         .name           = "mpt_user",
66         .probe          = mpt_user_probe,
67         .attach         = mpt_user_attach,
68         .enable         = mpt_user_enable,
69         .ready          = mpt_user_ready,
70         .event          = mpt_user_event,
71         .reset          = mpt_user_reset,
72         .detach         = mpt_user_detach,
73 };
74
75 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
76
77 static mpt_reply_handler_t      mpt_user_reply_handler;
78
79 static d_open_t         mpt_open;
80 static d_close_t        mpt_close;
81 static d_ioctl_t        mpt_ioctl;
82
83 static struct dev_ops mpt_ops = {
84         { "mpt", 0, 0 },
85         .d_open =       mpt_open,
86         .d_close =      mpt_close,
87         .d_ioctl =      mpt_ioctl,
88 };
89
90 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
91
92 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
93
94 int
95 mpt_user_probe(struct mpt_softc *mpt)
96 {
97
98         /* Attach to every controller. */
99         return (0);
100 }
101
102 int
103 mpt_user_attach(struct mpt_softc *mpt)
104 {
105         mpt_handler_t handler;
106         int error, unit;
107
108         MPT_LOCK(mpt);
109         handler.reply_handler = mpt_user_reply_handler;
110         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
111                                      &user_handler_id);
112         MPT_UNLOCK(mpt);
113         if (error != 0) {
114                 mpt_prt(mpt, "Unable to register user handler!\n");
115                 return (error);
116         }
117         unit = device_get_unit(mpt->dev);
118         mpt->cdev = make_dev(&mpt_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
119             "mpt%d", unit);
120         if (mpt->cdev == NULL) {
121                 MPT_LOCK(mpt);
122                 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
123                     user_handler_id);
124                 MPT_UNLOCK(mpt);
125                 return (ENOMEM);
126         }
127         mpt->cdev->si_drv1 = mpt;
128         return (0);
129 }
130
131 int
132 mpt_user_enable(struct mpt_softc *mpt)
133 {
134
135         return (0);
136 }
137
138 void
139 mpt_user_ready(struct mpt_softc *mpt)
140 {
141 }
142
143 int
144 mpt_user_event(struct mpt_softc *mpt, request_t *req,
145     MSG_EVENT_NOTIFY_REPLY *msg)
146 {
147
148         /* Someday we may want to let a user daemon listen for events? */
149         return (0);
150 }
151
152 void
153 mpt_user_reset(struct mpt_softc *mpt, int type)
154 {
155 }
156
157 void
158 mpt_user_detach(struct mpt_softc *mpt)
159 {
160         mpt_handler_t handler;
161
162         /* XXX: do a purge of pending requests? */
163         destroy_dev(mpt->cdev);
164
165         MPT_LOCK(mpt);
166         handler.reply_handler = mpt_user_reply_handler;
167         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
168             user_handler_id);
169         MPT_UNLOCK(mpt);
170 }
171
172 static int
173 mpt_open(struct dev_open_args *ap)
174 {
175
176         return (0);
177 }
178
179 static int
180 mpt_close(struct dev_close_args *ap)
181 {
182
183         return (0);
184 }
185
186 static int
187 mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
188     size_t len)
189 {
190         struct mpt_map_info mi;
191         int error;
192
193         page_mem->vaddr = NULL;
194
195         /* Limit requests to 16M. */
196         if (len > 16 * 1024 * 1024)
197                 return (ENOSPC);
198         error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
199             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
200             len, 1, len, 0, &page_mem->tag);
201         if (error)
202                 return (error);
203         error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
204             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &page_mem->map);
205         if (error) {
206                 bus_dma_tag_destroy(page_mem->tag);
207                 return (error);
208         }
209         mi.mpt = mpt;
210         error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
211             len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
212         if (error == 0)
213                 error = mi.error;
214         if (error) {
215                 bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
216                 bus_dma_tag_destroy(page_mem->tag);
217                 page_mem->vaddr = NULL;
218                 return (error);
219         }
220         page_mem->paddr = mi.phys;
221         return (0);
222 }
223
224 static void
225 mpt_free_buffer(struct mpt_page_memory *page_mem)
226 {
227
228         if (page_mem->vaddr == NULL)
229                 return;
230         bus_dmamap_unload(page_mem->tag, page_mem->map);
231         bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
232         bus_dma_tag_destroy(page_mem->tag);
233         page_mem->vaddr = NULL;
234 }
235
236 static int
237 mpt_user_read_cfg_header(struct mpt_softc *mpt,
238     struct mpt_cfg_page_req *page_req)
239 {
240         request_t  *req;
241         cfgparms_t params;
242         MSG_CONFIG *cfgp;
243         int         error;
244
245         req = mpt_get_request(mpt, TRUE);
246         if (req == NULL) {
247                 mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
248                 return (ENOMEM);
249         }
250
251         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
252         params.PageVersion = 0;
253         params.PageLength = 0;
254         params.PageNumber = page_req->header.PageNumber;
255         params.PageType = page_req->header.PageType;
256         params.PageAddress = le32toh(page_req->page_address);
257         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
258                                   TRUE, 5000);
259         if (error != 0) {
260                 /*
261                  * Leave the request. Without resetting the chip, it's
262                  * still owned by it and we'll just get into trouble
263                  * freeing it now. Mark it as abandoned so that if it
264                  * shows up later it can be freed.
265                  */
266                 mpt_prt(mpt, "read_cfg_header timed out\n");
267                 return (ETIMEDOUT);
268         }
269
270         page_req->ioc_status = htole16(req->IOCStatus);
271         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
272                 cfgp = req->req_vbuf;
273                 bcopy(&cfgp->Header, &page_req->header,
274                     sizeof(page_req->header));
275         }
276         mpt_free_request(mpt, req);
277         return (0);
278 }
279
280 static int
281 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
282     struct mpt_page_memory *mpt_page)
283 {
284         CONFIG_PAGE_HEADER *hdr;
285         request_t    *req;
286         cfgparms_t    params;
287         int           error;
288
289         req = mpt_get_request(mpt, TRUE);
290         if (req == NULL) {
291                 mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
292                 return (ENOMEM);
293         }
294
295         hdr = mpt_page->vaddr;
296         params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
297         params.PageVersion = hdr->PageVersion;
298         params.PageLength = hdr->PageLength;
299         params.PageNumber = hdr->PageNumber;
300         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
301         params.PageAddress = le32toh(page_req->page_address);
302         bus_dmamap_sync(mpt_page->tag, mpt_page->map,
303             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
304         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
305             le32toh(page_req->len), TRUE, 5000);
306         if (error != 0) {
307                 mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
308                 return (ETIMEDOUT);
309         }
310
311         page_req->ioc_status = htole16(req->IOCStatus);
312         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
313                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
314                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
315         mpt_free_request(mpt, req);
316         return (0);
317 }
318
319 static int
320 mpt_user_read_extcfg_header(struct mpt_softc *mpt,
321     struct mpt_ext_cfg_page_req *ext_page_req)
322 {
323         request_t  *req;
324         cfgparms_t params;
325         MSG_CONFIG_REPLY *cfgp;
326         int         error;
327
328         req = mpt_get_request(mpt, TRUE);
329         if (req == NULL) {
330                 mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
331                 return (ENOMEM);
332         }
333
334         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
335         params.PageVersion = ext_page_req->header.PageVersion;
336         params.PageLength = 0;
337         params.PageNumber = ext_page_req->header.PageNumber;
338         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
339         params.PageAddress = le32toh(ext_page_req->page_address);
340         params.ExtPageType = ext_page_req->header.ExtPageType;
341         params.ExtPageLength = 0;
342         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
343                                   TRUE, 5000);
344         if (error != 0) {
345                 /*
346                  * Leave the request. Without resetting the chip, it's
347                  * still owned by it and we'll just get into trouble
348                  * freeing it now. Mark it as abandoned so that if it
349                  * shows up later it can be freed.
350                  */
351                 mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
352                 return (ETIMEDOUT);
353         }
354
355         ext_page_req->ioc_status = htole16(req->IOCStatus);
356         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
357                 cfgp = req->req_vbuf;
358                 ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
359                 ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
360                 ext_page_req->header.PageType = cfgp->Header.PageType;
361                 ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
362                 ext_page_req->header.ExtPageType = cfgp->ExtPageType;
363         }
364         mpt_free_request(mpt, req);
365         return (0);
366 }
367
368 static int
369 mpt_user_read_extcfg_page(struct mpt_softc *mpt,
370     struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
371 {
372         CONFIG_EXTENDED_PAGE_HEADER *hdr;
373         request_t    *req;
374         cfgparms_t    params;
375         int           error;
376
377         req = mpt_get_request(mpt, TRUE);
378         if (req == NULL) {
379                 mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
380                 return (ENOMEM);
381         }
382
383         hdr = mpt_page->vaddr;
384         params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
385         params.PageVersion = hdr->PageVersion;
386         params.PageLength = 0;
387         params.PageNumber = hdr->PageNumber;
388         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
389         params.PageAddress = le32toh(ext_page_req->page_address);
390         params.ExtPageType = hdr->ExtPageType;
391         params.ExtPageLength = hdr->ExtPageLength;
392         bus_dmamap_sync(mpt_page->tag, mpt_page->map,
393             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
394         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
395             le32toh(ext_page_req->len), TRUE, 5000);
396         if (error != 0) {
397                 mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
398                 return (ETIMEDOUT);
399         }
400
401         ext_page_req->ioc_status = htole16(req->IOCStatus);
402         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
403                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
404                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
405         mpt_free_request(mpt, req);
406         return (0);
407 }
408
409 static int
410 mpt_user_write_cfg_page(struct mpt_softc *mpt,
411     struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
412 {
413         CONFIG_PAGE_HEADER *hdr;
414         request_t    *req;
415         cfgparms_t    params;
416         u_int         hdr_attr;
417         int           error;
418
419         hdr = mpt_page->vaddr;
420         hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
421         if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
422             hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
423                 mpt_prt(mpt, "page type 0x%x not changeable\n",
424                         hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
425                 return (EINVAL);
426         }
427
428 #if     0
429         /*
430          * We shouldn't mask off other bits here.
431          */
432         hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
433 #endif
434
435         req = mpt_get_request(mpt, TRUE);
436         if (req == NULL)
437                 return (ENOMEM);
438
439         bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREREAD |
440             BUS_DMASYNC_PREWRITE);
441
442         /*
443          * There isn't any point in restoring stripped out attributes
444          * if you then mask them going down to issue the request.
445          */
446
447         params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
448         params.PageVersion = hdr->PageVersion;
449         params.PageLength = hdr->PageLength;
450         params.PageNumber = hdr->PageNumber;
451         params.PageAddress = le32toh(page_req->page_address);
452 #if     0
453         /* Restore stripped out attributes */
454         hdr->PageType |= hdr_attr;
455         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
456 #else
457         params.PageType = hdr->PageType;
458 #endif
459         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
460             le32toh(page_req->len), TRUE, 5000);
461         if (error != 0) {
462                 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
463                 return (ETIMEDOUT);
464         }
465
466         page_req->ioc_status = htole16(req->IOCStatus);
467         bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_POSTREAD |
468             BUS_DMASYNC_POSTWRITE);
469         mpt_free_request(mpt, req);
470         return (0);
471 }
472
473 static int
474 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
475     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
476 {
477         MSG_RAID_ACTION_REPLY *reply;
478         struct mpt_user_raid_action_result *res;
479
480         if (req == NULL)
481                 return (TRUE);
482
483         if (reply_frame != NULL) {
484                 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
485                 req->IOCStatus = le16toh(reply->IOCStatus);
486                 res = (struct mpt_user_raid_action_result *)
487                     (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
488                 res->action_status = reply->ActionStatus;
489                 res->volume_status = reply->VolumeStatus;
490                 bcopy(&reply->ActionData, res->action_data,
491                     sizeof(res->action_data));
492         }
493
494         req->state &= ~REQ_STATE_QUEUED;
495         req->state |= REQ_STATE_DONE;
496         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
497
498         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
499                 wakeup(req);
500         } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
501                 /*
502                  * Whew- we can free this request (late completion)
503                  */
504                 mpt_free_request(mpt, req);
505         }
506
507         return (TRUE);
508 }
509
510 /*
511  * We use the first part of the request buffer after the request frame
512  * to hold the action data and action status from the RAID reply.  The
513  * rest of the request buffer is used to hold the buffer for the
514  * action SGE.
515  */
516 static int
517 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
518         struct mpt_page_memory *mpt_page)
519 {
520         request_t *req;
521         struct mpt_user_raid_action_result *res;
522         MSG_RAID_ACTION_REQUEST *rap;
523         SGE_SIMPLE32 *se;
524         int error;
525
526         req = mpt_get_request(mpt, TRUE);
527         if (req == NULL)
528                 return (ENOMEM);
529         rap = req->req_vbuf;
530         memset(rap, 0, sizeof *rap);
531         rap->Action = raid_act->action;
532         rap->ActionDataWord = raid_act->action_data_word;
533         rap->Function = MPI_FUNCTION_RAID_ACTION;
534         rap->VolumeID = raid_act->volume_id;
535         rap->VolumeBus = raid_act->volume_bus;
536         rap->PhysDiskNum = raid_act->phys_disk_num;
537         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
538         if (mpt_page->vaddr != NULL && raid_act->len != 0) {
539                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
540                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
541                 se->Address = htole32(mpt_page->paddr);
542                 MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
543                 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
544                     MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
545                     MPI_SGE_FLAGS_END_OF_LIST |
546                     raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
547                     MPI_SGE_FLAGS_IOC_TO_HOST));
548         }
549         se->FlagsLength = htole32(se->FlagsLength);
550         rap->MsgContext = htole32(req->index | user_handler_id);
551
552         mpt_check_doorbell(mpt);
553         mpt_send_cmd(mpt, req);
554
555         error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
556             2000);
557         if (error != 0) {
558                 /*
559                  * Leave request so it can be cleaned up later.
560                  */
561                 mpt_prt(mpt, "mpt_user_raid_action timed out\n");
562                 return (error);
563         }
564
565         raid_act->ioc_status = htole16(req->IOCStatus);
566         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
567                 mpt_free_request(mpt, req);
568                 return (0);
569         }
570
571         res = (struct mpt_user_raid_action_result *)
572             (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
573         raid_act->volume_status = res->volume_status;
574         raid_act->action_status = res->action_status;
575         bcopy(res->action_data, raid_act->action_data,
576             sizeof(res->action_data));
577         if (mpt_page->vaddr != NULL)
578                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
579                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
580         mpt_free_request(mpt, req);
581         return (0);
582 }
583
584 #ifdef __x86_64__
585 #define PTRIN(p)                ((void *)(uintptr_t)(p))
586 #define PTROUT(v)               ((u_int32_t)(uintptr_t)(v))
587 #endif
588
589 static int
590 mpt_ioctl(struct dev_ioctl_args *ap)
591 {
592         cdev_t dev = ap->a_head.a_dev;
593         u_long cmd = ap->a_cmd;
594         caddr_t arg = ap->a_data;
595         struct mpt_softc *mpt;
596         struct mpt_cfg_page_req *page_req;
597         struct mpt_ext_cfg_page_req *ext_page_req;
598         struct mpt_raid_action *raid_act;
599         struct mpt_page_memory mpt_page;
600 #ifdef __x86_64__
601         struct mpt_cfg_page_req32 *page_req32;
602         struct mpt_cfg_page_req page_req_swab;
603         struct mpt_ext_cfg_page_req32 *ext_page_req32;
604         struct mpt_ext_cfg_page_req ext_page_req_swab;
605         struct mpt_raid_action32 *raid_act32;
606         struct mpt_raid_action raid_act_swab;
607 #endif
608         int error;
609
610         mpt = dev->si_drv1;
611         page_req = (void *)arg;
612         ext_page_req = (void *)arg;
613         raid_act = (void *)arg;
614         mpt_page.vaddr = NULL;
615
616 #ifdef __x86_64__
617         /* Convert 32-bit structs to native ones. */
618         page_req32 = (void *)arg;
619         ext_page_req32 = (void *)arg;
620         raid_act32 = (void *)arg;
621         switch (cmd) {
622         case MPTIO_READ_CFG_HEADER32:
623         case MPTIO_READ_CFG_PAGE32:
624         case MPTIO_WRITE_CFG_PAGE32:
625                 page_req = &page_req_swab;
626                 page_req->header = page_req32->header;
627                 page_req->page_address = page_req32->page_address;
628                 page_req->buf = PTRIN(page_req32->buf);
629                 page_req->len = page_req32->len;
630                 page_req->ioc_status = page_req32->ioc_status;
631                 break;
632         case MPTIO_READ_EXT_CFG_HEADER32:
633         case MPTIO_READ_EXT_CFG_PAGE32:
634                 ext_page_req = &ext_page_req_swab;
635                 ext_page_req->header = ext_page_req32->header;
636                 ext_page_req->page_address = ext_page_req32->page_address;
637                 ext_page_req->buf = PTRIN(ext_page_req32->buf);
638                 ext_page_req->len = ext_page_req32->len;
639                 ext_page_req->ioc_status = ext_page_req32->ioc_status;
640                 break;
641         case MPTIO_RAID_ACTION32:
642                 raid_act = &raid_act_swab;
643                 raid_act->action = raid_act32->action;
644                 raid_act->volume_bus = raid_act32->volume_bus;
645                 raid_act->volume_id = raid_act32->volume_id;
646                 raid_act->phys_disk_num = raid_act32->phys_disk_num;
647                 raid_act->action_data_word = raid_act32->action_data_word;
648                 raid_act->buf = PTRIN(raid_act32->buf);
649                 raid_act->len = raid_act32->len;
650                 raid_act->volume_status = raid_act32->volume_status;
651                 bcopy(raid_act32->action_data, raid_act->action_data,
652                     sizeof(raid_act->action_data));
653                 raid_act->action_status = raid_act32->action_status;
654                 raid_act->ioc_status = raid_act32->ioc_status;
655                 raid_act->write = raid_act32->write;
656                 break;
657         }
658 #endif
659
660         switch (cmd) {
661 #ifdef __x86_64__
662         case MPTIO_READ_CFG_HEADER32:
663 #endif
664         case MPTIO_READ_CFG_HEADER:
665                 MPT_LOCK(mpt);
666                 error = mpt_user_read_cfg_header(mpt, page_req);
667                 MPT_UNLOCK(mpt);
668                 break;
669 #ifdef __x86_64__
670         case MPTIO_READ_CFG_PAGE32:
671 #endif
672         case MPTIO_READ_CFG_PAGE:
673                 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
674                 if (error)
675                         break;
676                 error = copyin(page_req->buf, mpt_page.vaddr,
677                     sizeof(CONFIG_PAGE_HEADER));
678                 if (error)
679                         break;
680                 MPT_LOCK(mpt);
681                 error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
682                 MPT_UNLOCK(mpt);
683                 if (error)
684                         break;
685                 error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
686                 break;
687 #ifdef __x86_64__
688         case MPTIO_READ_EXT_CFG_HEADER32:
689 #endif
690         case MPTIO_READ_EXT_CFG_HEADER:
691                 MPT_LOCK(mpt);
692                 error = mpt_user_read_extcfg_header(mpt, ext_page_req);
693                 MPT_UNLOCK(mpt);
694                 break;
695 #ifdef __x86_64__
696         case MPTIO_READ_EXT_CFG_PAGE32:
697 #endif
698         case MPTIO_READ_EXT_CFG_PAGE:
699                 error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
700                 if (error)
701                         break;
702                 error = copyin(ext_page_req->buf, mpt_page.vaddr,
703                     sizeof(CONFIG_EXTENDED_PAGE_HEADER));
704                 if (error)
705                         break;
706                 MPT_LOCK(mpt);
707                 error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
708                 MPT_UNLOCK(mpt);
709                 if (error)
710                         break;
711                 error = copyout(mpt_page.vaddr, ext_page_req->buf,
712                     ext_page_req->len);
713                 break;
714 #ifdef __x86_64__
715         case MPTIO_WRITE_CFG_PAGE32:
716 #endif
717         case MPTIO_WRITE_CFG_PAGE:
718                 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
719                 if (error)
720                         break;
721                 error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
722                 if (error)
723                         break;
724                 MPT_LOCK(mpt);
725                 error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
726                 MPT_UNLOCK(mpt);
727                 break;
728 #ifdef __x86_64__
729         case MPTIO_RAID_ACTION32:
730 #endif
731         case MPTIO_RAID_ACTION:
732                 if (raid_act->buf != NULL) {
733                         error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
734                         if (error)
735                                 break;
736                         error = copyin(raid_act->buf, mpt_page.vaddr,
737                             raid_act->len);
738                         if (error)
739                                 break;
740                 }
741                 MPT_LOCK(mpt);
742                 error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
743                 MPT_UNLOCK(mpt);
744                 if (error)
745                         break;
746                 if (raid_act->buf != NULL)
747                         error = copyout(mpt_page.vaddr, raid_act->buf,
748                             raid_act->len);
749                 break;
750         default:
751                 error = ENOIOCTL;
752                 break;
753         }
754
755         mpt_free_buffer(&mpt_page);
756
757         if (error)
758                 return (error);
759
760 #ifdef __x86_64__
761         /* Convert native structs to 32-bit ones. */
762         switch (cmd) {
763         case MPTIO_READ_CFG_HEADER32:
764         case MPTIO_READ_CFG_PAGE32:
765         case MPTIO_WRITE_CFG_PAGE32:
766                 page_req32->header = page_req->header;
767                 page_req32->page_address = page_req->page_address;
768                 page_req32->buf = PTROUT(page_req->buf);
769                 page_req32->len = page_req->len;
770                 page_req32->ioc_status = page_req->ioc_status;
771                 break;
772         case MPTIO_READ_EXT_CFG_HEADER32:
773         case MPTIO_READ_EXT_CFG_PAGE32:
774                 ext_page_req32->header = ext_page_req->header;
775                 ext_page_req32->page_address = ext_page_req->page_address;
776                 ext_page_req32->buf = PTROUT(ext_page_req->buf);
777                 ext_page_req32->len = ext_page_req->len;
778                 ext_page_req32->ioc_status = ext_page_req->ioc_status;
779                 break;
780         case MPTIO_RAID_ACTION32:
781                 raid_act32->action = raid_act->action;
782                 raid_act32->volume_bus = raid_act->volume_bus;
783                 raid_act32->volume_id = raid_act->volume_id;
784                 raid_act32->phys_disk_num = raid_act->phys_disk_num;
785                 raid_act32->action_data_word = raid_act->action_data_word;
786                 raid_act32->buf = PTROUT(raid_act->buf);
787                 raid_act32->len = raid_act->len;
788                 raid_act32->volume_status = raid_act->volume_status;
789                 bcopy(raid_act->action_data, raid_act32->action_data,
790                     sizeof(raid_act->action_data));
791                 raid_act32->action_status = raid_act->action_status;
792                 raid_act32->ioc_status = raid_act->ioc_status;
793                 raid_act32->write = raid_act->write;
794                 break;
795         }
796 #endif
797
798         return (0);
799 }