X-Git-Url: https://gitweb.dragonflybsd.org/dragonfly.git/blobdiff_plain/45c6d8c4132b612b445b858c6ed1d930140bc840..e0fb398bfbef1fb6d12dfb8308cdc83ce663cbc2:/sys/dev/disk/ahci/ahci_cam.c diff --git a/sys/dev/disk/ahci/ahci_cam.c b/sys/dev/disk/ahci/ahci_cam.c index d5f524da0e..8e61fbf4f7 100644 --- a/sys/dev/disk/ahci/ahci_cam.c +++ b/sys/dev/disk/ahci/ahci_cam.c @@ -1,4 +1,6 @@ /* + * (MPSAFE) + * * Copyright (c) 2009 The DragonFly Project. All rights reserved. * * This code is derived from software contributed to The DragonFly Project @@ -78,6 +80,7 @@ static void ahci_ata_atapi_sense(struct ata_fis_d2h *rfis, static int ahci_cam_probe_disk(struct ahci_port *ap, struct ata_port *at); static int ahci_cam_probe_atapi(struct ahci_port *ap, struct ata_port *at); +static int ahci_set_xfer(struct ahci_port *ap, struct ata_port *atx); static void ahci_ata_dummy_done(struct ata_xfer *xa); static void ata_fix_identify(struct ata_identify *id); static void ahci_cam_rescan(struct ahci_port *ap); @@ -103,15 +106,23 @@ ahci_cam_attach(struct ahci_port *ap) if (devq == NULL) { return (ENOMEM); } + + /* + * Give the devq enough room to run with 32 max_dev_transactions, + * but set the overall max tags to 1 until NCQ is negotiated. + */ sim = cam_sim_alloc(ahci_xpt_action, ahci_xpt_poll, "ahci", - (void *)ap, unit, &sim_mplock, 1, 1, devq); + (void *)ap, unit, &ap->ap_sim_lock, + 32, 1, devq); cam_simq_release(devq); if (sim == NULL) { return (ENOMEM); } ap->ap_sim = sim; ahci_os_unlock_port(ap); + lockmgr(&ap->ap_sim_lock, LK_EXCLUSIVE); error = xpt_bus_register(ap->ap_sim, ap->ap_num); + lockmgr(&ap->ap_sim_lock, LK_RELEASE); ahci_os_lock_port(ap); if (error != CAM_SUCCESS) { ahci_cam_detach(ap); @@ -189,7 +200,7 @@ ahci_cam_detach(struct ahci_port *ap) if ((ap->ap_flags & AP_F_CAM_ATTACHED) == 0) return; - get_mplock(); + lockmgr(&ap->ap_sim_lock, LK_EXCLUSIVE); if (ap->ap_sim) { xpt_freeze_simq(ap->ap_sim, 1); } @@ -202,7 +213,7 @@ ahci_cam_detach(struct ahci_port *ap) cam_sim_free(ap->ap_sim); ap->ap_sim = NULL; } - rel_mplock(); + lockmgr(&ap->ap_sim_lock, LK_RELEASE); ap->ap_flags &= ~AP_F_CAM_ATTACHED; } @@ -367,8 +378,8 @@ ahci_cam_probe(struct ahci_port *ap, struct ata_port *atx) } } if (at->at_ncqdepth >= ap->ap_sc->sc_ncmds) { - cam_devq_resize(ap->ap_sim->devq, - at->at_ncqdepth - 1); + cam_sim_set_max_tags(ap->ap_sim, + at->at_ncqdepth - 1); } } } else { @@ -494,6 +505,11 @@ ahci_cam_probe_disk(struct ahci_port *ap, struct ata_port *atx) at = atx ? atx : ap->ap_ata[0]; + /* + * Set dummy xfer mode + */ + ahci_set_xfer(ap, atx); + /* * Enable write cache if supported * @@ -580,6 +596,68 @@ ahci_cam_probe_disk(struct ahci_port *ap, struct ata_port *atx) static int ahci_cam_probe_atapi(struct ahci_port *ap, struct ata_port *atx) { + ahci_set_xfer(ap, atx); + return(0); +} + +/* + * Setting the transfer mode is irrelevant for the SATA transport + * but some (atapi) devices seem to need it anyway. In addition + * if we are running through a SATA->PATA converter for some reason + * beyond my comprehension we might have to set the mode. + * + * We only support DMA modes for SATA attached devices, so don't bother + * with legacy modes. + */ +static int +ahci_set_xfer(struct ahci_port *ap, struct ata_port *atx) +{ + struct ata_port *at; + struct ata_xfer *xa; + u_int16_t mode; + u_int16_t mask; + + at = atx ? atx : ap->ap_ata[0]; + + /* + * Figure out the supported UDMA mode. Ignore other legacy modes. + */ + mask = le16toh(at->at_identify.ultradma); + if ((mask & 0xFF) == 0 || mask == 0xFFFF) + return(0); + mask &= 0xFF; + mode = 0x4F; + while ((mask & 0x8000) == 0) { + mask <<= 1; + --mode; + } + + /* + * SATA atapi devices often still report a dma mode, even though + * it is irrelevant for SATA transport. It is also possible that + * we are running through a SATA->PATA converter and seeing the + * PATA dma mode. + * + * In this case the device may require a (dummy) SETXFER to be + * sent before it will work properly. + */ + xa = ahci_ata_get_xfer(ap, atx); + xa->complete = ahci_ata_dummy_done; + xa->fis->command = ATA_C_SET_FEATURES; + xa->fis->features = ATA_SF_SETXFER; + xa->fis->flags = ATA_H2D_FLAGS_CMD | at->at_target; + xa->fis->sector_count = mode; + xa->flags = ATA_F_PIO | ATA_F_POLL; + xa->timeout = 1000; + xa->datalen = 0; + if (ahci_ata_cmd(xa) != ATA_S_COMPLETE) { + kprintf("%s: Unable to set dummy xfer mode \n", + ATANAME(ap, atx)); + } else if (bootverbose) { + kprintf("%s: Set dummy xfer mode to %02x\n", + ATANAME(ap, atx), mode); + } + ahci_ata_put_xfer(xa); return(0); } @@ -804,6 +882,9 @@ ahci_xpt_action(struct cam_sim *sim, union ccb *ccb) case AHCI_PREG_SSTS_SPD_GEN2: ccb->cpi.base_transfer_speed = 300000; break; + case AHCI_PREG_SSTS_SPD_GEN3: + ccb->cpi.base_transfer_speed = 600000; + break; default: /* unknown */ ccb->cpi.base_transfer_speed = 1000; @@ -877,6 +958,17 @@ ahci_xpt_action(struct cam_sim *sim, union ccb *ccb) break; } break; + case XPT_TRIM: + { + scsi_cdb_t cdb; + struct ccb_scsiio *csio; + csio = &ccb->csio; + cdb = (void *)((ccbh->flags & CAM_CDB_POINTER) ? + csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes); + cdb->generic.opcode = TRIM; + ahci_xpt_scsi_disk_io(ap, atx, ccb); + break; + } default: ccbh->status = CAM_REQ_INVALID; xpt_done(ccb); @@ -986,6 +1078,17 @@ ahci_xpt_scsi_disk_io(struct ahci_port *ap, struct ata_port *atx, sizeof(rdata->inquiry_data.revision)); ccbh->status = CAM_REQ_CMP; } + + /* + * Use the vendor specific area to set the TRIM status + * for scsi_da + */ + if (at->at_identify.support_dsm) { + rdata->inquiry_data.vendor_specific1[0] = + at->at_identify.support_dsm &ATA_SUPPORT_DSM_TRIM; + rdata->inquiry_data.vendor_specific1[1] = + at->at_identify.max_dsm_blocks; + } break; case READ_CAPACITY_16: if (cdb->read_capacity_16.service_action != SRC16_SERVICE_ACTION) { @@ -1038,6 +1141,36 @@ ahci_xpt_scsi_disk_io(struct ahci_port *ap, struct ata_port *atx, xa->flags = 0; xa->complete = ahci_ata_complete_disk_synchronize_cache; break; + case TRIM: + fis = xa->fis; + fis->command = ATA_C_DATA_SET_MANAGEMENT; + fis->features = (u_int8_t)ATA_SF_DSM_TRIM; + fis->features_exp = (u_int8_t)(ATA_SF_DSM_TRIM>> 8); + + xa->flags = ATA_F_WRITE; + fis->flags = ATA_H2D_FLAGS_CMD; + + xa->data = csio->data_ptr; + xa->datalen = csio->dxfer_len; + xa->timeout = ccbh->timeout*50; /* milliseconds */ + + fis->sector_count =(u_int8_t)(xa->datalen/512); + fis->sector_count_exp =(u_int8_t)((xa->datalen/512)>>8); + + lba = 0; + fis->lba_low = (u_int8_t)lba; + fis->lba_mid = (u_int8_t)(lba >> 8); + fis->lba_high = (u_int8_t)(lba >> 16); + fis->lba_low_exp = (u_int8_t)(lba >> 24); + fis->lba_mid_exp = (u_int8_t)(lba >> 32); + fis->lba_high_exp = (u_int8_t)(lba >> 40); + + fis->device = ATA_H2D_DEVICE_LBA; + xa->data = csio->data_ptr; + + xa->complete = ahci_ata_complete_disk_rw; + ccbh->status = CAM_REQ_INPROG; + break; case TEST_UNIT_READY: case START_STOP_UNIT: case PREVENT_ALLOW: @@ -1064,7 +1197,7 @@ ahci_xpt_scsi_disk_io(struct ahci_port *ap, struct ata_port *atx, default: xa->flags = 0; } - xa->flags |= ATA_F_POLL; + xa->flags |= ATA_F_POLL | ATA_F_EXCLUSIVE; xa->data = csio->data_ptr; xa->datalen = csio->dxfer_len; xa->complete = ahci_ata_complete_disk_rw; @@ -1106,7 +1239,7 @@ ahci_xpt_scsi_disk_io(struct ahci_port *ap, struct ata_port *atx, default: xa->flags = 0; } - xa->flags |= ATA_F_POLL; + xa->flags |= ATA_F_POLL | ATA_F_EXCLUSIVE; xa->data = csio->data_ptr; xa->datalen = csio->dxfer_len; xa->complete = ahci_ata_complete_disk_rw;