mpt(4): Sync with FreeBSD.
[dragonfly.git] / sys / dev / disk / mpt / mpt_user.c
1 /*-
2  * Copyright (c) 2008 Yahoo!, Inc.
3  * All rights reserved.
4  * Written by: John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the name of the author nor the names of any co-contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  * LSI MPT-Fusion Host Adapter FreeBSD userland interface
31  *
32  * $FreeBSD: src/sys/dev/mpt/mpt_user.c,v 1.6 2011/07/29 18:35:10 marius Exp $
33  */
34
35 #include <sys/param.h>
36 #include <sys/conf.h>
37 #include <sys/device.h>
38 #include <sys/errno.h>
39 #include <sys/mpt_ioctl.h>
40
41 #include <dev/disk/mpt/mpt.h>
42
43 struct mpt_user_raid_action_result {
44         uint32_t        volume_status;
45         uint32_t        action_data[4];
46         uint16_t        action_status;
47 };
48
49 struct mpt_page_memory {
50         bus_dma_tag_t   tag;
51         bus_dmamap_t    map;
52         bus_addr_t      paddr;
53         void            *vaddr;
54 };
55
56 static mpt_probe_handler_t      mpt_user_probe;
57 static mpt_attach_handler_t     mpt_user_attach;
58 static mpt_enable_handler_t     mpt_user_enable;
59 static mpt_ready_handler_t      mpt_user_ready;
60 static mpt_event_handler_t      mpt_user_event;
61 static mpt_reset_handler_t      mpt_user_reset;
62 static mpt_detach_handler_t     mpt_user_detach;
63
64 static struct mpt_personality mpt_user_personality = {
65         .name           = "mpt_user",
66         .probe          = mpt_user_probe,
67         .attach         = mpt_user_attach,
68         .enable         = mpt_user_enable,
69         .ready          = mpt_user_ready,
70         .event          = mpt_user_event,
71         .reset          = mpt_user_reset,
72         .detach         = mpt_user_detach,
73 };
74
75 DECLARE_MPT_PERSONALITY(mpt_user, SI_ORDER_SECOND);
76
77 static mpt_reply_handler_t      mpt_user_reply_handler;
78
79 static d_open_t         mpt_open;
80 static d_close_t        mpt_close;
81 static d_ioctl_t        mpt_ioctl;
82
83 static struct dev_ops mpt_ops = {
84         { "mpt", 0, 0 },
85         .d_open =       mpt_open,
86         .d_close =      mpt_close,
87         .d_ioctl =      mpt_ioctl,
88 };
89
90 static MALLOC_DEFINE(M_MPTUSER, "mpt_user", "Buffers for mpt(4) ioctls");
91
92 static uint32_t user_handler_id = MPT_HANDLER_ID_NONE;
93
94 static int
95 mpt_user_probe(struct mpt_softc *mpt)
96 {
97
98         /* Attach to every controller. */
99         return (0);
100 }
101
102 static int
103 mpt_user_attach(struct mpt_softc *mpt)
104 {
105         mpt_handler_t handler;
106         int error, unit;
107
108         MPT_LOCK(mpt);
109         handler.reply_handler = mpt_user_reply_handler;
110         error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
111                                      &user_handler_id);
112         MPT_UNLOCK(mpt);
113         if (error != 0) {
114                 mpt_prt(mpt, "Unable to register user handler!\n");
115                 return (error);
116         }
117         unit = device_get_unit(mpt->dev);
118         mpt->cdev = make_dev(&mpt_ops, unit, UID_ROOT, GID_OPERATOR, 0640,
119             "mpt%d", unit);
120         if (mpt->cdev == NULL) {
121                 MPT_LOCK(mpt);
122                 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
123                     user_handler_id);
124                 MPT_UNLOCK(mpt);
125                 return (ENOMEM);
126         }
127         mpt->cdev->si_drv1 = mpt;
128         return (0);
129 }
130
131 static int
132 mpt_user_enable(struct mpt_softc *mpt)
133 {
134
135         return (0);
136 }
137
138 static void
139 mpt_user_ready(struct mpt_softc *mpt)
140 {
141
142 }
143
144 static int
145 mpt_user_event(struct mpt_softc *mpt, request_t *req,
146     MSG_EVENT_NOTIFY_REPLY *msg)
147 {
148
149         /* Someday we may want to let a user daemon listen for events? */
150         return (0);
151 }
152
153 static void
154 mpt_user_reset(struct mpt_softc *mpt, int type)
155 {
156
157 }
158
159 static void
160 mpt_user_detach(struct mpt_softc *mpt)
161 {
162         mpt_handler_t handler;
163
164         /* XXX: do a purge of pending requests? */
165         destroy_dev(mpt->cdev);
166
167         MPT_LOCK(mpt);
168         handler.reply_handler = mpt_user_reply_handler;
169         mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
170             user_handler_id);
171         MPT_UNLOCK(mpt);
172 }
173
174 static int
175 mpt_open(struct dev_open_args *ap)
176 {
177
178         return (0);
179 }
180
181 static int
182 mpt_close(struct dev_close_args *ap)
183 {
184
185         return (0);
186 }
187
188 static int
189 mpt_alloc_buffer(struct mpt_softc *mpt, struct mpt_page_memory *page_mem,
190     size_t len)
191 {
192         struct mpt_map_info mi;
193         int error;
194
195         page_mem->vaddr = NULL;
196
197         /* Limit requests to 16M. */
198         if (len > 16 * 1024 * 1024)
199                 return (ENOSPC);
200         error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
201             BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
202             len, 1, len, 0, &page_mem->tag);
203         if (error)
204                 return (error);
205         error = bus_dmamem_alloc(page_mem->tag, &page_mem->vaddr,
206             BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &page_mem->map);
207         if (error) {
208                 bus_dma_tag_destroy(page_mem->tag);
209                 return (error);
210         }
211         mi.mpt = mpt;
212         error = bus_dmamap_load(page_mem->tag, page_mem->map, page_mem->vaddr,
213             len, mpt_map_rquest, &mi, BUS_DMA_NOWAIT);
214         if (error == 0)
215                 error = mi.error;
216         if (error) {
217                 bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
218                 bus_dma_tag_destroy(page_mem->tag);
219                 page_mem->vaddr = NULL;
220                 return (error);
221         }
222         page_mem->paddr = mi.phys;
223         return (0);
224 }
225
226 static void
227 mpt_free_buffer(struct mpt_page_memory *page_mem)
228 {
229
230         if (page_mem->vaddr == NULL)
231                 return;
232         bus_dmamap_unload(page_mem->tag, page_mem->map);
233         bus_dmamem_free(page_mem->tag, page_mem->vaddr, page_mem->map);
234         bus_dma_tag_destroy(page_mem->tag);
235         page_mem->vaddr = NULL;
236 }
237
238 static int
239 mpt_user_read_cfg_header(struct mpt_softc *mpt,
240     struct mpt_cfg_page_req *page_req)
241 {
242         request_t  *req;
243         cfgparms_t params;
244         MSG_CONFIG *cfgp;
245         int         error;
246
247         req = mpt_get_request(mpt, TRUE);
248         if (req == NULL) {
249                 mpt_prt(mpt, "mpt_user_read_cfg_header: Get request failed!\n");
250                 return (ENOMEM);
251         }
252
253         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
254         params.PageVersion = 0;
255         params.PageLength = 0;
256         params.PageNumber = page_req->header.PageNumber;
257         params.PageType = page_req->header.PageType;
258         params.PageAddress = le32toh(page_req->page_address);
259         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
260                                   TRUE, 5000);
261         if (error != 0) {
262                 /*
263                  * Leave the request. Without resetting the chip, it's
264                  * still owned by it and we'll just get into trouble
265                  * freeing it now. Mark it as abandoned so that if it
266                  * shows up later it can be freed.
267                  */
268                 mpt_prt(mpt, "read_cfg_header timed out\n");
269                 return (ETIMEDOUT);
270         }
271
272         page_req->ioc_status = htole16(req->IOCStatus);
273         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
274                 cfgp = req->req_vbuf;
275                 bcopy(&cfgp->Header, &page_req->header,
276                     sizeof(page_req->header));
277         }
278         mpt_free_request(mpt, req);
279         return (0);
280 }
281
282 static int
283 mpt_user_read_cfg_page(struct mpt_softc *mpt, struct mpt_cfg_page_req *page_req,
284     struct mpt_page_memory *mpt_page)
285 {
286         CONFIG_PAGE_HEADER *hdr;
287         request_t    *req;
288         cfgparms_t    params;
289         int           error;
290
291         req = mpt_get_request(mpt, TRUE);
292         if (req == NULL) {
293                 mpt_prt(mpt, "mpt_user_read_cfg_page: Get request failed!\n");
294                 return (ENOMEM);
295         }
296
297         hdr = mpt_page->vaddr;
298         params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
299         params.PageVersion = hdr->PageVersion;
300         params.PageLength = hdr->PageLength;
301         params.PageNumber = hdr->PageNumber;
302         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
303         params.PageAddress = le32toh(page_req->page_address);
304         bus_dmamap_sync(mpt_page->tag, mpt_page->map,
305             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
306         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
307             le32toh(page_req->len), TRUE, 5000);
308         if (error != 0) {
309                 mpt_prt(mpt, "mpt_user_read_cfg_page timed out\n");
310                 return (ETIMEDOUT);
311         }
312
313         page_req->ioc_status = htole16(req->IOCStatus);
314         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
315                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
316                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
317         mpt_free_request(mpt, req);
318         return (0);
319 }
320
321 static int
322 mpt_user_read_extcfg_header(struct mpt_softc *mpt,
323     struct mpt_ext_cfg_page_req *ext_page_req)
324 {
325         request_t  *req;
326         cfgparms_t params;
327         MSG_CONFIG_REPLY *cfgp;
328         int         error;
329
330         req = mpt_get_request(mpt, TRUE);
331         if (req == NULL) {
332                 mpt_prt(mpt, "mpt_user_read_extcfg_header: Get request failed!\n");
333                 return (ENOMEM);
334         }
335
336         params.Action = MPI_CONFIG_ACTION_PAGE_HEADER;
337         params.PageVersion = ext_page_req->header.PageVersion;
338         params.PageLength = 0;
339         params.PageNumber = ext_page_req->header.PageNumber;
340         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
341         params.PageAddress = le32toh(ext_page_req->page_address);
342         params.ExtPageType = ext_page_req->header.ExtPageType;
343         params.ExtPageLength = 0;
344         error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
345                                   TRUE, 5000);
346         if (error != 0) {
347                 /*
348                  * Leave the request. Without resetting the chip, it's
349                  * still owned by it and we'll just get into trouble
350                  * freeing it now. Mark it as abandoned so that if it
351                  * shows up later it can be freed.
352                  */
353                 mpt_prt(mpt, "mpt_user_read_extcfg_header timed out\n");
354                 return (ETIMEDOUT);
355         }
356
357         ext_page_req->ioc_status = htole16(req->IOCStatus);
358         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS) {
359                 cfgp = req->req_vbuf;
360                 ext_page_req->header.PageVersion = cfgp->Header.PageVersion;
361                 ext_page_req->header.PageNumber = cfgp->Header.PageNumber;
362                 ext_page_req->header.PageType = cfgp->Header.PageType;
363                 ext_page_req->header.ExtPageLength = cfgp->ExtPageLength;
364                 ext_page_req->header.ExtPageType = cfgp->ExtPageType;
365         }
366         mpt_free_request(mpt, req);
367         return (0);
368 }
369
370 static int
371 mpt_user_read_extcfg_page(struct mpt_softc *mpt,
372     struct mpt_ext_cfg_page_req *ext_page_req, struct mpt_page_memory *mpt_page)
373 {
374         CONFIG_EXTENDED_PAGE_HEADER *hdr;
375         request_t    *req;
376         cfgparms_t    params;
377         int           error;
378
379         req = mpt_get_request(mpt, TRUE);
380         if (req == NULL) {
381                 mpt_prt(mpt, "mpt_user_read_extcfg_page: Get request failed!\n");
382                 return (ENOMEM);
383         }
384
385         hdr = mpt_page->vaddr;
386         params.Action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
387         params.PageVersion = hdr->PageVersion;
388         params.PageLength = 0;
389         params.PageNumber = hdr->PageNumber;
390         params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
391         params.PageAddress = le32toh(ext_page_req->page_address);
392         params.ExtPageType = hdr->ExtPageType;
393         params.ExtPageLength = hdr->ExtPageLength;
394         bus_dmamap_sync(mpt_page->tag, mpt_page->map,
395             BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
396         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
397             le32toh(ext_page_req->len), TRUE, 5000);
398         if (error != 0) {
399                 mpt_prt(mpt, "mpt_user_read_extcfg_page timed out\n");
400                 return (ETIMEDOUT);
401         }
402
403         ext_page_req->ioc_status = htole16(req->IOCStatus);
404         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) == MPI_IOCSTATUS_SUCCESS)
405                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
406                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
407         mpt_free_request(mpt, req);
408         return (0);
409 }
410
411 static int
412 mpt_user_write_cfg_page(struct mpt_softc *mpt,
413     struct mpt_cfg_page_req *page_req, struct mpt_page_memory *mpt_page)
414 {
415         CONFIG_PAGE_HEADER *hdr;
416         request_t    *req;
417         cfgparms_t    params;
418         u_int         hdr_attr;
419         int           error;
420
421         hdr = mpt_page->vaddr;
422         hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
423         if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
424             hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
425                 mpt_prt(mpt, "page type 0x%x not changeable\n",
426                         hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
427                 return (EINVAL);
428         }
429
430 #if     0
431         /*
432          * We shouldn't mask off other bits here.
433          */
434         hdr->PageType &= ~MPI_CONFIG_PAGETYPE_MASK;
435 #endif
436
437         req = mpt_get_request(mpt, TRUE);
438         if (req == NULL)
439                 return (ENOMEM);
440
441         bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_PREREAD |
442             BUS_DMASYNC_PREWRITE);
443
444         /*
445          * There isn't any point in restoring stripped out attributes
446          * if you then mask them going down to issue the request.
447          */
448
449         params.Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
450         params.PageVersion = hdr->PageVersion;
451         params.PageLength = hdr->PageLength;
452         params.PageNumber = hdr->PageNumber;
453         params.PageAddress = le32toh(page_req->page_address);
454 #if     0
455         /* Restore stripped out attributes */
456         hdr->PageType |= hdr_attr;
457         params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK;
458 #else
459         params.PageType = hdr->PageType;
460 #endif
461         error = mpt_issue_cfg_req(mpt, req, &params, mpt_page->paddr,
462             le32toh(page_req->len), TRUE, 5000);
463         if (error != 0) {
464                 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
465                 return (ETIMEDOUT);
466         }
467
468         page_req->ioc_status = htole16(req->IOCStatus);
469         bus_dmamap_sync(mpt_page->tag, mpt_page->map, BUS_DMASYNC_POSTREAD |
470             BUS_DMASYNC_POSTWRITE);
471         mpt_free_request(mpt, req);
472         return (0);
473 }
474
475 static int
476 mpt_user_reply_handler(struct mpt_softc *mpt, request_t *req,
477     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
478 {
479         MSG_RAID_ACTION_REPLY *reply;
480         struct mpt_user_raid_action_result *res;
481
482         if (req == NULL)
483                 return (TRUE);
484
485         if (reply_frame != NULL) {
486                 reply = (MSG_RAID_ACTION_REPLY *)reply_frame;
487                 req->IOCStatus = le16toh(reply->IOCStatus);
488                 res = (struct mpt_user_raid_action_result *)
489                     (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
490                 res->action_status = reply->ActionStatus;
491                 res->volume_status = reply->VolumeStatus;
492                 bcopy(&reply->ActionData, res->action_data,
493                     sizeof(res->action_data));
494         }
495
496         req->state &= ~REQ_STATE_QUEUED;
497         req->state |= REQ_STATE_DONE;
498         TAILQ_REMOVE(&mpt->request_pending_list, req, links);
499
500         if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
501                 wakeup(req);
502         } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
503                 /*
504                  * Whew- we can free this request (late completion)
505                  */
506                 mpt_free_request(mpt, req);
507         }
508
509         return (TRUE);
510 }
511
512 /*
513  * We use the first part of the request buffer after the request frame
514  * to hold the action data and action status from the RAID reply.  The
515  * rest of the request buffer is used to hold the buffer for the
516  * action SGE.
517  */
518 static int
519 mpt_user_raid_action(struct mpt_softc *mpt, struct mpt_raid_action *raid_act,
520         struct mpt_page_memory *mpt_page)
521 {
522         request_t *req;
523         struct mpt_user_raid_action_result *res;
524         MSG_RAID_ACTION_REQUEST *rap;
525         SGE_SIMPLE32 *se;
526         int error;
527
528         req = mpt_get_request(mpt, TRUE);
529         if (req == NULL)
530                 return (ENOMEM);
531         rap = req->req_vbuf;
532         memset(rap, 0, sizeof *rap);
533         rap->Action = raid_act->action;
534         rap->ActionDataWord = raid_act->action_data_word;
535         rap->Function = MPI_FUNCTION_RAID_ACTION;
536         rap->VolumeID = raid_act->volume_id;
537         rap->VolumeBus = raid_act->volume_bus;
538         rap->PhysDiskNum = raid_act->phys_disk_num;
539         se = (SGE_SIMPLE32 *)&rap->ActionDataSGE;
540         if (mpt_page->vaddr != NULL && raid_act->len != 0) {
541                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
542                     BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
543                 se->Address = htole32(mpt_page->paddr);
544                 MPI_pSGE_SET_LENGTH(se, le32toh(raid_act->len));
545                 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
546                     MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
547                     MPI_SGE_FLAGS_END_OF_LIST |
548                     raid_act->write ? MPI_SGE_FLAGS_HOST_TO_IOC :
549                     MPI_SGE_FLAGS_IOC_TO_HOST));
550         }
551         se->FlagsLength = htole32(se->FlagsLength);
552         rap->MsgContext = htole32(req->index | user_handler_id);
553
554         mpt_check_doorbell(mpt);
555         mpt_send_cmd(mpt, req);
556
557         error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE,
558             2000);
559         if (error != 0) {
560                 /*
561                  * Leave request so it can be cleaned up later.
562                  */
563                 mpt_prt(mpt, "mpt_user_raid_action timed out\n");
564                 return (error);
565         }
566
567         raid_act->ioc_status = htole16(req->IOCStatus);
568         if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
569                 mpt_free_request(mpt, req);
570                 return (0);
571         }
572
573         res = (struct mpt_user_raid_action_result *)
574             (((uint8_t *)req->req_vbuf) + MPT_RQSL(mpt));
575         raid_act->volume_status = res->volume_status;
576         raid_act->action_status = res->action_status;
577         bcopy(res->action_data, raid_act->action_data,
578             sizeof(res->action_data));
579         if (mpt_page->vaddr != NULL)
580                 bus_dmamap_sync(mpt_page->tag, mpt_page->map,
581                     BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
582         mpt_free_request(mpt, req);
583         return (0);
584 }
585
586 #ifdef __x86_64__
587 #define PTRIN(p)                ((void *)(uintptr_t)(p))
588 #define PTROUT(v)               ((u_int32_t)(uintptr_t)(v))
589 #endif
590
591 static int
592 mpt_ioctl(struct dev_ioctl_args *ap)
593 {
594         cdev_t dev = ap->a_head.a_dev;
595         u_long cmd = ap->a_cmd;
596         caddr_t arg = ap->a_data;
597         struct mpt_softc *mpt;
598         struct mpt_cfg_page_req *page_req;
599         struct mpt_ext_cfg_page_req *ext_page_req;
600         struct mpt_raid_action *raid_act;
601         struct mpt_page_memory mpt_page;
602 #ifdef __x86_64__
603         struct mpt_cfg_page_req32 *page_req32;
604         struct mpt_cfg_page_req page_req_swab;
605         struct mpt_ext_cfg_page_req32 *ext_page_req32;
606         struct mpt_ext_cfg_page_req ext_page_req_swab;
607         struct mpt_raid_action32 *raid_act32;
608         struct mpt_raid_action raid_act_swab;
609 #endif
610         int error;
611
612         mpt = dev->si_drv1;
613         page_req = (void *)arg;
614         ext_page_req = (void *)arg;
615         raid_act = (void *)arg;
616         mpt_page.vaddr = NULL;
617
618 #ifdef __x86_64__
619         /* Convert 32-bit structs to native ones. */
620         page_req32 = (void *)arg;
621         ext_page_req32 = (void *)arg;
622         raid_act32 = (void *)arg;
623         switch (cmd) {
624         case MPTIO_READ_CFG_HEADER32:
625         case MPTIO_READ_CFG_PAGE32:
626         case MPTIO_WRITE_CFG_PAGE32:
627                 page_req = &page_req_swab;
628                 page_req->header = page_req32->header;
629                 page_req->page_address = page_req32->page_address;
630                 page_req->buf = PTRIN(page_req32->buf);
631                 page_req->len = page_req32->len;
632                 page_req->ioc_status = page_req32->ioc_status;
633                 break;
634         case MPTIO_READ_EXT_CFG_HEADER32:
635         case MPTIO_READ_EXT_CFG_PAGE32:
636                 ext_page_req = &ext_page_req_swab;
637                 ext_page_req->header = ext_page_req32->header;
638                 ext_page_req->page_address = ext_page_req32->page_address;
639                 ext_page_req->buf = PTRIN(ext_page_req32->buf);
640                 ext_page_req->len = ext_page_req32->len;
641                 ext_page_req->ioc_status = ext_page_req32->ioc_status;
642                 break;
643         case MPTIO_RAID_ACTION32:
644                 raid_act = &raid_act_swab;
645                 raid_act->action = raid_act32->action;
646                 raid_act->volume_bus = raid_act32->volume_bus;
647                 raid_act->volume_id = raid_act32->volume_id;
648                 raid_act->phys_disk_num = raid_act32->phys_disk_num;
649                 raid_act->action_data_word = raid_act32->action_data_word;
650                 raid_act->buf = PTRIN(raid_act32->buf);
651                 raid_act->len = raid_act32->len;
652                 raid_act->volume_status = raid_act32->volume_status;
653                 bcopy(raid_act32->action_data, raid_act->action_data,
654                     sizeof(raid_act->action_data));
655                 raid_act->action_status = raid_act32->action_status;
656                 raid_act->ioc_status = raid_act32->ioc_status;
657                 raid_act->write = raid_act32->write;
658                 break;
659         }
660 #endif
661
662         switch (cmd) {
663 #ifdef __x86_64__
664         case MPTIO_READ_CFG_HEADER32:
665 #endif
666         case MPTIO_READ_CFG_HEADER:
667                 MPT_LOCK(mpt);
668                 error = mpt_user_read_cfg_header(mpt, page_req);
669                 MPT_UNLOCK(mpt);
670                 break;
671 #ifdef __x86_64__
672         case MPTIO_READ_CFG_PAGE32:
673 #endif
674         case MPTIO_READ_CFG_PAGE:
675                 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
676                 if (error)
677                         break;
678                 error = copyin(page_req->buf, mpt_page.vaddr,
679                     sizeof(CONFIG_PAGE_HEADER));
680                 if (error)
681                         break;
682                 MPT_LOCK(mpt);
683                 error = mpt_user_read_cfg_page(mpt, page_req, &mpt_page);
684                 MPT_UNLOCK(mpt);
685                 if (error)
686                         break;
687                 error = copyout(mpt_page.vaddr, page_req->buf, page_req->len);
688                 break;
689 #ifdef __x86_64__
690         case MPTIO_READ_EXT_CFG_HEADER32:
691 #endif
692         case MPTIO_READ_EXT_CFG_HEADER:
693                 MPT_LOCK(mpt);
694                 error = mpt_user_read_extcfg_header(mpt, ext_page_req);
695                 MPT_UNLOCK(mpt);
696                 break;
697 #ifdef __x86_64__
698         case MPTIO_READ_EXT_CFG_PAGE32:
699 #endif
700         case MPTIO_READ_EXT_CFG_PAGE:
701                 error = mpt_alloc_buffer(mpt, &mpt_page, ext_page_req->len);
702                 if (error)
703                         break;
704                 error = copyin(ext_page_req->buf, mpt_page.vaddr,
705                     sizeof(CONFIG_EXTENDED_PAGE_HEADER));
706                 if (error)
707                         break;
708                 MPT_LOCK(mpt);
709                 error = mpt_user_read_extcfg_page(mpt, ext_page_req, &mpt_page);
710                 MPT_UNLOCK(mpt);
711                 if (error)
712                         break;
713                 error = copyout(mpt_page.vaddr, ext_page_req->buf,
714                     ext_page_req->len);
715                 break;
716 #ifdef __x86_64__
717         case MPTIO_WRITE_CFG_PAGE32:
718 #endif
719         case MPTIO_WRITE_CFG_PAGE:
720                 error = mpt_alloc_buffer(mpt, &mpt_page, page_req->len);
721                 if (error)
722                         break;
723                 error = copyin(page_req->buf, mpt_page.vaddr, page_req->len);
724                 if (error)
725                         break;
726                 MPT_LOCK(mpt);
727                 error = mpt_user_write_cfg_page(mpt, page_req, &mpt_page);
728                 MPT_UNLOCK(mpt);
729                 break;
730 #ifdef __x86_64__
731         case MPTIO_RAID_ACTION32:
732 #endif
733         case MPTIO_RAID_ACTION:
734                 if (raid_act->buf != NULL) {
735                         error = mpt_alloc_buffer(mpt, &mpt_page, raid_act->len);
736                         if (error)
737                                 break;
738                         error = copyin(raid_act->buf, mpt_page.vaddr,
739                             raid_act->len);
740                         if (error)
741                                 break;
742                 }
743                 MPT_LOCK(mpt);
744                 error = mpt_user_raid_action(mpt, raid_act, &mpt_page);
745                 MPT_UNLOCK(mpt);
746                 if (error)
747                         break;
748                 if (raid_act->buf != NULL)
749                         error = copyout(mpt_page.vaddr, raid_act->buf,
750                             raid_act->len);
751                 break;
752         default:
753                 error = ENOIOCTL;
754                 break;
755         }
756
757         mpt_free_buffer(&mpt_page);
758
759         if (error)
760                 return (error);
761
762 #ifdef __x86_64__
763         /* Convert native structs to 32-bit ones. */
764         switch (cmd) {
765         case MPTIO_READ_CFG_HEADER32:
766         case MPTIO_READ_CFG_PAGE32:
767         case MPTIO_WRITE_CFG_PAGE32:
768                 page_req32->header = page_req->header;
769                 page_req32->page_address = page_req->page_address;
770                 page_req32->buf = PTROUT(page_req->buf);
771                 page_req32->len = page_req->len;
772                 page_req32->ioc_status = page_req->ioc_status;
773                 break;
774         case MPTIO_READ_EXT_CFG_HEADER32:
775         case MPTIO_READ_EXT_CFG_PAGE32:
776                 ext_page_req32->header = ext_page_req->header;
777                 ext_page_req32->page_address = ext_page_req->page_address;
778                 ext_page_req32->buf = PTROUT(ext_page_req->buf);
779                 ext_page_req32->len = ext_page_req->len;
780                 ext_page_req32->ioc_status = ext_page_req->ioc_status;
781                 break;
782         case MPTIO_RAID_ACTION32:
783                 raid_act32->action = raid_act->action;
784                 raid_act32->volume_bus = raid_act->volume_bus;
785                 raid_act32->volume_id = raid_act->volume_id;
786                 raid_act32->phys_disk_num = raid_act->phys_disk_num;
787                 raid_act32->action_data_word = raid_act->action_data_word;
788                 raid_act32->buf = PTROUT(raid_act->buf);
789                 raid_act32->len = raid_act->len;
790                 raid_act32->volume_status = raid_act->volume_status;
791                 bcopy(raid_act->action_data, raid_act32->action_data,
792                     sizeof(raid_act->action_data));
793                 raid_act32->action_status = raid_act->action_status;
794                 raid_act32->ioc_status = raid_act->ioc_status;
795                 raid_act32->write = raid_act->write;
796                 break;
797         }
798 #endif
799
800         return (0);
801 }