diff options
Diffstat (limited to 'drivers/scsi/aacraid')
-rw-r--r-- | drivers/scsi/aacraid/aachba.c | 410 | ||||
-rw-r--r-- | drivers/scsi/aacraid/aacraid.h | 106 | ||||
-rw-r--r-- | drivers/scsi/aacraid/commctrl.c | 10 | ||||
-rw-r--r-- | drivers/scsi/aacraid/comminit.c | 106 | ||||
-rw-r--r-- | drivers/scsi/aacraid/commsup.c | 96 | ||||
-rw-r--r-- | drivers/scsi/aacraid/dpcsup.c | 13 | ||||
-rw-r--r-- | drivers/scsi/aacraid/linit.c | 61 | ||||
-rw-r--r-- | drivers/scsi/aacraid/rx.c | 14 | ||||
-rw-r--r-- | drivers/scsi/aacraid/src.c | 438 |
9 files changed, 1012 insertions, 242 deletions
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index b32e77db0c48..9b3dd6ef6a0b 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -111,6 +111,41 @@ #define BYTE2(x) (unsigned char)((x) >> 16) #define BYTE3(x) (unsigned char)((x) >> 24) +/* MODE_SENSE data format */ +typedef struct { + struct { + u8 data_length; + u8 med_type; + u8 dev_par; + u8 bd_length; + } __attribute__((packed)) hd; + struct { + u8 dens_code; + u8 block_count[3]; + u8 reserved; + u8 block_length[3]; + } __attribute__((packed)) bd; + u8 mpc_buf[3]; +} __attribute__((packed)) aac_modep_data; + +/* MODE_SENSE_10 data format */ +typedef struct { + struct { + u8 data_length[2]; + u8 med_type; + u8 dev_par; + u8 rsrvd[2]; + u8 bd_length[2]; + } __attribute__((packed)) hd; + struct { + u8 dens_code; + u8 block_count[3]; + u8 reserved; + u8 block_length[3]; + } __attribute__((packed)) bd; + u8 mpc_buf[3]; +} __attribute__((packed)) aac_modep10_data; + /*------------------------------------------------------------------------------ * S T R U C T S / T Y P E D E F S *----------------------------------------------------------------------------*/ @@ -128,6 +163,48 @@ struct inquiry_data { u8 inqd_prl[4]; /* Product Revision Level */ }; +/* Added for VPD 0x83 */ +typedef struct { + u8 CodeSet:4; /* VPD_CODE_SET */ + u8 Reserved:4; + u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */ + u8 Reserved2:4; + u8 Reserved3; + u8 IdentifierLength; + u8 VendId[8]; + u8 ProductId[16]; + u8 SerialNumber[8]; /* SN in ASCII */ + +} TVPD_ID_Descriptor_Type_1; + +typedef struct { + u8 CodeSet:4; /* VPD_CODE_SET */ + u8 Reserved:4; + u8 IdentifierType:4; /* VPD_IDENTIFIER_TYPE */ + u8 Reserved2:4; + u8 Reserved3; + u8 IdentifierLength; + struct TEU64Id { + u32 Serial; + /* The serial number supposed to be 40 bits, + * bit we only support 32, so make the last byte zero. */ + u8 Reserved; + u8 VendId[3]; + } EU64Id; + +} TVPD_ID_Descriptor_Type_2; + +typedef struct { + u8 DeviceType:5; + u8 DeviceTypeQualifier:3; + u8 PageCode; + u8 Reserved; + u8 PageLength; + TVPD_ID_Descriptor_Type_1 IdDescriptorType1; + TVPD_ID_Descriptor_Type_2 IdDescriptorType2; + +} TVPD_Page83; + /* * M O D U L E G L O B A L S */ @@ -385,6 +462,11 @@ int aac_get_containers(struct aac_dev *dev) if (status >= 0) { dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); + if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_SUPPORTED_240_VOLUMES) { + maximum_num_containers = + le32_to_cpu(dresp->MaxSimpleVolumes); + } aac_fib_complete(fibptr); } /* FIB should be freed only after getting the response from the F/W */ @@ -438,7 +520,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr) if ((le32_to_cpu(get_name_reply->status) == CT_OK) && (get_name_reply->data[0] != '\0')) { char *sp = get_name_reply->data; - sp[sizeof(((struct aac_get_name_resp *)NULL)->data)-1] = '\0'; + sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0'; while (*sp == ' ') ++sp; if (*sp) { @@ -539,6 +621,14 @@ static void _aac_probe_container2(void * context, struct fib * fibptr) if ((le32_to_cpu(dresp->status) == ST_OK) && (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { + if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_VARIABLE_BLOCK_SIZE)) { + dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200; + fsa_dev_ptr->block_size = 0x200; + } else { + fsa_dev_ptr->block_size = + le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size); + } fsa_dev_ptr->valid = 1; /* sense_key holds the current state of the spin-up */ if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY)) @@ -571,7 +661,9 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) int status; dresp = (struct aac_mount *) fib_data(fibptr); - dresp->mnt[0].capacityhigh = 0; + if (!(fibptr->dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_VARIABLE_BLOCK_SIZE)) + dresp->mnt[0].capacityhigh = 0; if ((le32_to_cpu(dresp->status) != ST_OK) || (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { _aac_probe_container2(context, fibptr); @@ -586,7 +678,12 @@ static void _aac_probe_container1(void * context, struct fib * fibptr) dinfo = (struct aac_query_mount *)fib_data(fibptr); - dinfo->command = cpu_to_le32(VM_NameServe64); + if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_VARIABLE_BLOCK_SIZE) + dinfo->command = cpu_to_le32(VM_NameServeAllBlk); + else + dinfo->command = cpu_to_le32(VM_NameServe64); + dinfo->count = cpu_to_le32(scmd_id(scsicmd)); dinfo->type = cpu_to_le32(FT_FILESYS); @@ -621,7 +718,12 @@ static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(stru dinfo = (struct aac_query_mount *)fib_data(fibptr); - dinfo->command = cpu_to_le32(VM_NameServe); + if (fibptr->dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_VARIABLE_BLOCK_SIZE) + dinfo->command = cpu_to_le32(VM_NameServeAllBlk); + else + dinfo->command = cpu_to_le32(VM_NameServe); + dinfo->count = cpu_to_le32(scmd_id(scsicmd)); dinfo->type = cpu_to_le32(FT_FILESYS); scsicmd->SCp.ptr = (char *)callback; @@ -835,14 +937,88 @@ static void get_container_serial_callback(void *context, struct fib * fibptr) get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr); /* Failure is irrelevant, using default value instead */ if (le32_to_cpu(get_serial_reply->status) == CT_OK) { - char sp[13]; - /* EVPD bit set */ - sp[0] = INQD_PDT_DA; - sp[1] = scsicmd->cmnd[2]; - sp[2] = 0; - sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X", - le32_to_cpu(get_serial_reply->uid)); - scsi_sg_copy_from_buffer(scsicmd, sp, sizeof(sp)); + /*Check to see if it's for VPD 0x83 or 0x80 */ + if (scsicmd->cmnd[2] == 0x83) { + /* vpd page 0x83 - Device Identification Page */ + int i; + TVPD_Page83 VPDPage83Data; + + memset(((u8 *)&VPDPage83Data), 0, + sizeof(VPDPage83Data)); + + /* DIRECT_ACCESS_DEVIC */ + VPDPage83Data.DeviceType = 0; + /* DEVICE_CONNECTED */ + VPDPage83Data.DeviceTypeQualifier = 0; + /* VPD_DEVICE_IDENTIFIERS */ + VPDPage83Data.PageCode = 0x83; + VPDPage83Data.Reserved = 0; + VPDPage83Data.PageLength = + sizeof(VPDPage83Data.IdDescriptorType1) + + sizeof(VPDPage83Data.IdDescriptorType2); + + /* T10 Vendor Identifier Field Format */ + /* VpdCodeSetAscii */ + VPDPage83Data.IdDescriptorType1.CodeSet = 2; + /* VpdIdentifierTypeVendorId */ + VPDPage83Data.IdDescriptorType1.IdentifierType = 1; + VPDPage83Data.IdDescriptorType1.IdentifierLength = + sizeof(VPDPage83Data.IdDescriptorType1) - 4; + + /* "ADAPTEC " for adaptec */ + memcpy(VPDPage83Data.IdDescriptorType1.VendId, + "ADAPTEC ", + sizeof(VPDPage83Data.IdDescriptorType1.VendId)); + memcpy(VPDPage83Data.IdDescriptorType1.ProductId, + "ARRAY ", + sizeof( + VPDPage83Data.IdDescriptorType1.ProductId)); + + /* Convert to ascii based serial number. + * The LSB is the the end. + */ + for (i = 0; i < 8; i++) { + u8 temp = + (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF); + if (temp > 0x9) { + VPDPage83Data.IdDescriptorType1.SerialNumber[i] = + 'A' + (temp - 0xA); + } else { + VPDPage83Data.IdDescriptorType1.SerialNumber[i] = + '0' + temp; + } + } + + /* VpdCodeSetBinary */ + VPDPage83Data.IdDescriptorType2.CodeSet = 1; + /* VpdIdentifierTypeEUI64 */ + VPDPage83Data.IdDescriptorType2.IdentifierType = 2; + VPDPage83Data.IdDescriptorType2.IdentifierLength = + sizeof(VPDPage83Data.IdDescriptorType2) - 4; + + VPDPage83Data.IdDescriptorType2.EU64Id.VendId[0] = 0xD0; + VPDPage83Data.IdDescriptorType2.EU64Id.VendId[1] = 0; + VPDPage83Data.IdDescriptorType2.EU64Id.VendId[2] = 0; + + VPDPage83Data.IdDescriptorType2.EU64Id.Serial = + get_serial_reply->uid; + VPDPage83Data.IdDescriptorType2.EU64Id.Reserved = 0; + + /* Move the inquiry data to the response buffer. */ + scsi_sg_copy_from_buffer(scsicmd, &VPDPage83Data, + sizeof(VPDPage83Data)); + } else { + /* It must be for VPD 0x80 */ + char sp[13]; + /* EVPD bit set */ + sp[0] = INQD_PDT_DA; + sp[1] = scsicmd->cmnd[2]; + sp[2] = 0; + sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X", + le32_to_cpu(get_serial_reply->uid)); + scsi_sg_copy_from_buffer(scsicmd, sp, + sizeof(sp)); + } } scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; @@ -982,7 +1158,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 memset(readcmd2, 0, sizeof(struct aac_raw_io2)); readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - readcmd2->byteCount = cpu_to_le32(count<<9); + readcmd2->byteCount = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); readcmd2->cid = cpu_to_le16(scmd_id(cmd)); readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ); ret = aac_build_sgraw2(cmd, readcmd2, @@ -997,7 +1174,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 readcmd = (struct aac_raw_io *) fib_data(fib); readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - readcmd->count = cpu_to_le32(count<<9); + readcmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); readcmd->cid = cpu_to_le16(scmd_id(cmd)); readcmd->flags = cpu_to_le16(RIO_TYPE_READ); readcmd->bpTotal = 0; @@ -1062,6 +1240,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 { u16 fibsize; struct aac_read *readcmd; + struct aac_dev *dev = fib->dev; long ret; aac_fib_init(fib); @@ -1069,7 +1248,8 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 readcmd->command = cpu_to_le32(VM_CtBlockRead); readcmd->cid = cpu_to_le32(scmd_id(cmd)); readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); - readcmd->count = cpu_to_le32(count * 512); + readcmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); ret = aac_build_sg(cmd, &readcmd->sg); if (ret < 0) @@ -1104,7 +1284,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u memset(writecmd2, 0, sizeof(struct aac_raw_io2)); writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - writecmd2->byteCount = cpu_to_le32(count<<9); + writecmd2->byteCount = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); writecmd2->cid = cpu_to_le16(scmd_id(cmd)); writecmd2->flags = (fua && ((aac_cache & 5) != 1) && (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? @@ -1122,7 +1303,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u writecmd = (struct aac_raw_io *) fib_data(fib); writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); - writecmd->count = cpu_to_le32(count<<9); + writecmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); writecmd->cid = cpu_to_le16(scmd_id(cmd)); writecmd->flags = (fua && ((aac_cache & 5) != 1) && (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? @@ -1190,6 +1372,7 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 { u16 fibsize; struct aac_write *writecmd; + struct aac_dev *dev = fib->dev; long ret; aac_fib_init(fib); @@ -1197,7 +1380,8 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3 writecmd->command = cpu_to_le32(VM_CtBlockWrite); writecmd->cid = cpu_to_le32(scmd_id(cmd)); writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); - writecmd->count = cpu_to_le32(count * 512); + writecmd->count = cpu_to_le32(count * + dev->fsa_dev[scmd_id(cmd)].block_size); writecmd->sg.count = cpu_to_le32(1); /* ->stable is not used - it did mean which type of write */ @@ -2246,9 +2430,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) INQD_PDT_PROC : INQD_PDT_DA; if (scsicmd->cmnd[2] == 0) { /* supported vital product data pages */ - arr[3] = 2; + arr[3] = 3; arr[4] = 0x0; arr[5] = 0x80; + arr[6] = 0x83; arr[1] = scsicmd->cmnd[2]; scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); @@ -2264,7 +2449,16 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) if (aac_wwn != 2) return aac_get_container_serial( scsicmd); - /* SLES 10 SP1 special */ + scsicmd->result = DID_OK << 16 | + COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; + } else if (scsicmd->cmnd[2] == 0x83) { + /* vpd page 0x83 - Device Identification Page */ + char *sno = (char *)&inq_data; + sno[3] = setinqserial(dev, &sno[4], + scmd_id(scsicmd)); + if (aac_wwn != 2) + return aac_get_container_serial( + scsicmd); scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; } else { @@ -2329,10 +2523,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) cp[5] = (capacity >> 16) & 0xff; cp[6] = (capacity >> 8) & 0xff; cp[7] = (capacity >> 0) & 0xff; - cp[8] = 0; - cp[9] = 0; - cp[10] = 2; - cp[11] = 0; + cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; + cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff; cp[12] = 0; alloc_len = ((scsicmd->cmnd[10] << 24) @@ -2369,10 +2563,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) cp[1] = (capacity >> 16) & 0xff; cp[2] = (capacity >> 8) & 0xff; cp[3] = (capacity >> 0) & 0xff; - cp[4] = 0; - cp[5] = 0; - cp[6] = 2; - cp[7] = 0; + cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; + cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff; scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); /* Do not cache partition table for arrays */ scsicmd->device->removable = 1; @@ -2385,30 +2579,79 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) case MODE_SENSE: { - char mode_buf[7]; int mode_buf_length = 4; + u32 capacity; + aac_modep_data mpd; + + if (fsa_dev_ptr[cid].size <= 0x100000000ULL) + capacity = fsa_dev_ptr[cid].size - 1; + else + capacity = (u32)-1; dprintk((KERN_DEBUG "MODE SENSE command.\n")); - mode_buf[0] = 3; /* Mode data length */ - mode_buf[1] = 0; /* Medium type - default */ - mode_buf[2] = 0; /* Device-specific param, - bit 8: 0/1 = write enabled/protected - bit 4: 0/1 = FUA enabled */ + memset((char *)&mpd, 0, sizeof(aac_modep_data)); + + /* Mode data length */ + mpd.hd.data_length = sizeof(mpd.hd) - 1; + /* Medium type - default */ + mpd.hd.med_type = 0; + /* Device-specific param, + bit 8: 0/1 = write enabled/protected + bit 4: 0/1 = FUA enabled */ + mpd.hd.dev_par = 0; + if (dev->raw_io_interface && ((aac_cache & 5) != 1)) - mode_buf[2] = 0x10; - mode_buf[3] = 0; /* Block descriptor length */ + mpd.hd.dev_par = 0x10; + if (scsicmd->cmnd[1] & 0x8) + mpd.hd.bd_length = 0; /* Block descriptor length */ + else { + mpd.hd.bd_length = sizeof(mpd.bd); + mpd.hd.data_length += mpd.hd.bd_length; + mpd.bd.block_length[0] = + (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + mpd.bd.block_length[1] = + (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + mpd.bd.block_length[2] = + fsa_dev_ptr[cid].block_size & 0xff; + + mpd.mpc_buf[0] = scsicmd->cmnd[2]; + if (scsicmd->cmnd[2] == 0x1C) { + /* page length */ + mpd.mpc_buf[1] = 0xa; + /* Mode data length */ + mpd.hd.data_length = 23; + } else { + /* Mode data length */ + mpd.hd.data_length = 15; + } + + if (capacity > 0xffffff) { + mpd.bd.block_count[0] = 0xff; + mpd.bd.block_count[1] = 0xff; + mpd.bd.block_count[2] = 0xff; + } else { + mpd.bd.block_count[0] = (capacity >> 16) & 0xff; + mpd.bd.block_count[1] = (capacity >> 8) & 0xff; + mpd.bd.block_count[2] = capacity & 0xff; + } + } if (((scsicmd->cmnd[2] & 0x3f) == 8) || ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) { - mode_buf[0] = 6; - mode_buf[4] = 8; - mode_buf[5] = 1; - mode_buf[6] = ((aac_cache & 6) == 2) + mpd.hd.data_length += 3; + mpd.mpc_buf[0] = 8; + mpd.mpc_buf[1] = 1; + mpd.mpc_buf[2] = ((aac_cache & 6) == 2) ? 0 : 0x04; /* WCE */ - mode_buf_length = 7; - if (mode_buf_length > scsicmd->cmnd[4]) - mode_buf_length = scsicmd->cmnd[4]; + mode_buf_length = sizeof(mpd); } - scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length); + + if (mode_buf_length > scsicmd->cmnd[4]) + mode_buf_length = scsicmd->cmnd[4]; + else + mode_buf_length = sizeof(mpd); + scsi_sg_copy_from_buffer(scsicmd, + (char *)&mpd, + mode_buf_length); scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->scsi_done(scsicmd); @@ -2416,34 +2659,77 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) } case MODE_SENSE_10: { - char mode_buf[11]; + u32 capacity; int mode_buf_length = 8; + aac_modep10_data mpd10; + + if (fsa_dev_ptr[cid].size <= 0x100000000ULL) + capacity = fsa_dev_ptr[cid].size - 1; + else + capacity = (u32)-1; dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); - mode_buf[0] = 0; /* Mode data length (MSB) */ - mode_buf[1] = 6; /* Mode data length (LSB) */ - mode_buf[2] = 0; /* Medium type - default */ - mode_buf[3] = 0; /* Device-specific param, - bit 8: 0/1 = write enabled/protected - bit 4: 0/1 = FUA enabled */ + memset((char *)&mpd10, 0, sizeof(aac_modep10_data)); + /* Mode data length (MSB) */ + mpd10.hd.data_length[0] = 0; + /* Mode data length (LSB) */ + mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1; + /* Medium type - default */ + mpd10.hd.med_type = 0; + /* Device-specific param, + bit 8: 0/1 = write enabled/protected + bit 4: 0/1 = FUA enabled */ + mpd10.hd.dev_par = 0; + if (dev->raw_io_interface && ((aac_cache & 5) != 1)) - mode_buf[3] = 0x10; - mode_buf[4] = 0; /* reserved */ - mode_buf[5] = 0; /* reserved */ - mode_buf[6] = 0; /* Block descriptor length (MSB) */ - mode_buf[7] = 0; /* Block descriptor length (LSB) */ + mpd10.hd.dev_par = 0x10; + mpd10.hd.rsrvd[0] = 0; /* reserved */ + mpd10.hd.rsrvd[1] = 0; /* reserved */ + if (scsicmd->cmnd[1] & 0x8) { + /* Block descriptor length (MSB) */ + mpd10.hd.bd_length[0] = 0; + /* Block descriptor length (LSB) */ + mpd10.hd.bd_length[1] = 0; + } else { + mpd10.hd.bd_length[0] = 0; + mpd10.hd.bd_length[1] = sizeof(mpd10.bd); + + mpd10.hd.data_length[1] += mpd10.hd.bd_length[1]; + + mpd10.bd.block_length[0] = + (fsa_dev_ptr[cid].block_size >> 16) & 0xff; + mpd10.bd.block_length[1] = + (fsa_dev_ptr[cid].block_size >> 8) & 0xff; + mpd10.bd.block_length[2] = + fsa_dev_ptr[cid].block_size & 0xff; + + if (capacity > 0xffffff) { + mpd10.bd.block_count[0] = 0xff; + mpd10.bd.block_count[1] = 0xff; + mpd10.bd.block_count[2] = 0xff; + } else { + mpd10.bd.block_count[0] = + (capacity >> 16) & 0xff; + mpd10.bd.block_count[1] = + (capacity >> 8) & 0xff; + mpd10.bd.block_count[2] = + capacity & 0xff; + } + } if (((scsicmd->cmnd[2] & 0x3f) == 8) || ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) { - mode_buf[1] = 9; - mode_buf[8] = 8; - mode_buf[9] = 1; - mode_buf[10] = ((aac_cache & 6) == 2) + mpd10.hd.data_length[1] += 3; + mpd10.mpc_buf[0] = 8; + mpd10.mpc_buf[1] = 1; + mpd10.mpc_buf[2] = ((aac_cache & 6) == 2) ? 0 : 0x04; /* WCE */ - mode_buf_length = 11; + mode_buf_length = sizeof(mpd10); if (mode_buf_length > scsicmd->cmnd[8]) mode_buf_length = scsicmd->cmnd[8]; } - scsi_sg_copy_from_buffer(scsicmd, mode_buf, mode_buf_length); + scsi_sg_copy_from_buffer(scsicmd, + (char *)&mpd10, + mode_buf_length); scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; scsicmd->scsi_done(scsicmd); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index eaaf8705a5f4..40fe65c91b41 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -6,13 +6,63 @@ #define nblank(x) _nblank(x)[0] #include <linux/interrupt.h> +#include <linux/pci.h> /*------------------------------------------------------------------------------ * D E F I N E S *----------------------------------------------------------------------------*/ +#define AAC_MAX_MSIX 8 /* vectors */ +#define AAC_PCI_MSI_ENABLE 0x8000 + +enum { + AAC_ENABLE_INTERRUPT = 0x0, + AAC_DISABLE_INTERRUPT, + AAC_ENABLE_MSIX, + AAC_DISABLE_MSIX, + AAC_CLEAR_AIF_BIT, + AAC_CLEAR_SYNC_BIT, + AAC_ENABLE_INTX +}; + +#define AAC_INT_MODE_INTX (1<<0) +#define AAC_INT_MODE_MSI (1<<1) +#define AAC_INT_MODE_AIF (1<<2) +#define AAC_INT_MODE_SYNC (1<<3) + +#define AAC_INT_ENABLE_TYPE1_INTX 0xfffffffb +#define AAC_INT_ENABLE_TYPE1_MSIX 0xfffffffa +#define AAC_INT_DISABLE_ALL 0xffffffff + +/* Bit definitions in IOA->Host Interrupt Register */ +#define PMC_TRANSITION_TO_OPERATIONAL (1<<31) +#define PMC_IOARCB_TRANSFER_FAILED (1<<28) +#define PMC_IOA_UNIT_CHECK (1<<27) +#define PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE (1<<26) +#define PMC_CRITICAL_IOA_OP_IN_PROGRESS (1<<25) +#define PMC_IOARRIN_LOST (1<<4) +#define PMC_SYSTEM_BUS_MMIO_ERROR (1<<3) +#define PMC_IOA_PROCESSOR_IN_ERROR_STATE (1<<2) +#define PMC_HOST_RRQ_VALID (1<<1) +#define PMC_OPERATIONAL_STATUS (1<<31) +#define PMC_ALLOW_MSIX_VECTOR0 (1<<0) + +#define PMC_IOA_ERROR_INTERRUPTS (PMC_IOARCB_TRANSFER_FAILED | \ + PMC_IOA_UNIT_CHECK | \ + PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE | \ + PMC_IOARRIN_LOST | \ + PMC_SYSTEM_BUS_MMIO_ERROR | \ + PMC_IOA_PROCESSOR_IN_ERROR_STATE) + +#define PMC_ALL_INTERRUPT_BITS (PMC_IOA_ERROR_INTERRUPTS | \ + PMC_HOST_RRQ_VALID | \ + PMC_TRANSITION_TO_OPERATIONAL | \ + PMC_ALLOW_MSIX_VECTOR0) +#define PMC_GLOBAL_INT_BIT2 0x00000004 +#define PMC_GLOBAL_INT_BIT0 0x00000001 + #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 30300 +# define AAC_DRIVER_BUILD 40709 # define AAC_DRIVER_BRANCH "-ms" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -36,6 +86,7 @@ #define CONTAINER_TO_ID(cont) (cont) #define CONTAINER_TO_LUN(cont) (0) +#define PMC_DEVICE_S6 0x28b #define PMC_DEVICE_S7 0x28c #define PMC_DEVICE_S8 0x28d #define PMC_DEVICE_S9 0x28f @@ -434,7 +485,7 @@ enum fib_xfer_state { struct aac_init { __le32 InitStructRevision; - __le32 MiniPortRevision; + __le32 Sa_MSIXVectors; __le32 fsrev; __le32 CommHeaderAddress; __le32 FastIoCommAreaAddress; @@ -582,7 +633,8 @@ struct aac_queue { spinlock_t lockdata; /* Actual lock (used only on one side of the lock) */ struct list_head cmdq; /* A queue of FIBs which need to be prcessed by the FS thread. This is */ /* only valid for command queues which receive entries from the adapter. */ - u32 numpending; /* Number of entries on outstanding queue. */ + /* Number of entries on outstanding queue. */ + atomic_t numpending; struct aac_dev * dev; /* Back pointer to adapter structure */ }; @@ -755,7 +807,8 @@ struct rkt_registers { struct src_mu_registers { /* PCI*| Name */ - __le32 reserved0[8]; /* 00h | Reserved */ + __le32 reserved0[6]; /* 00h | Reserved */ + __le32 IOAR[2]; /* 18h | IOA->host interrupt register */ __le32 IDR; /* 20h | Inbound Doorbell Register */ __le32 IISR; /* 24h | Inbound Int. Status Register */ __le32 reserved1[3]; /* 28h | Reserved */ @@ -767,17 +820,18 @@ struct src_mu_registers { __le32 OMR; /* bch | Outbound Message Register */ __le32 IQ_L; /* c0h | Inbound Queue (Low address) */ __le32 IQ_H; /* c4h | Inbound Queue (High address) */ + __le32 ODR_MSI; /* c8h | MSI register for sync./AIF */ }; struct src_registers { - struct src_mu_registers MUnit; /* 00h - c7h */ + struct src_mu_registers MUnit; /* 00h - cbh */ union { struct { - __le32 reserved1[130790]; /* c8h - 7fc5fh */ + __le32 reserved1[130789]; /* cch - 7fc5fh */ struct src_inbound IndexRegs; /* 7fc60h */ } tupelo; struct { - __le32 reserved1[974]; /* c8h - fffh */ + __le32 reserved1[973]; /* cch - fffh */ struct src_inbound IndexRegs; /* 1000h */ } denali; } u; @@ -857,6 +911,7 @@ struct fsa_dev_info { u8 deleted; char devname[8]; struct sense_data sense_data; + u32 block_size; }; struct fib { @@ -960,6 +1015,10 @@ struct aac_supplement_adapter_info #define AAC_OPTION_IGNORE_RESET cpu_to_le32(0x00000002) #define AAC_OPTION_POWER_MANAGEMENT cpu_to_le32(0x00000004) #define AAC_OPTION_DOORBELL_RESET cpu_to_le32(0x00004000) +/* 4KB sector size */ +#define AAC_OPTION_VARIABLE_BLOCK_SIZE cpu_to_le32(0x00040000) +/* 240 simple volume support */ +#define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000) #define AAC_SIS_VERSION_V3 3 #define AAC_SIS_SLOT_UNKNOWN 0xFF @@ -1026,6 +1085,11 @@ struct aac_bus_info_response { #define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30) #define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31) +/* MSIX context */ +struct aac_msix_ctx { + int vector_no; + struct aac_dev *dev; +}; struct aac_dev { @@ -1081,8 +1145,10 @@ struct aac_dev * if AAC_COMM_MESSAGE_TYPE1 */ dma_addr_t host_rrq_pa; /* phys. address */ - u32 host_rrq_idx; /* index into rrq buffer */ - + /* index into rrq buffer */ + u32 host_rrq_idx[AAC_MAX_MSIX]; + atomic_t rrq_outstanding[AAC_MAX_MSIX]; + u32 fibs_pushed_no; struct pci_dev *pdev; /* Our PCI interface */ void * printfbuf; /* pointer to buffer used for printf's from the adapter */ void * comm_addr; /* Base address of Comm area */ @@ -1151,6 +1217,13 @@ struct aac_dev int sync_mode; struct fib *sync_fib; struct list_head sync_fib_list; + u32 doorbell_mask; + u32 max_msix; /* max. MSI-X vectors */ + u32 vector_cap; /* MSI-X vector capab.*/ + int msi_enabled; /* MSI/MSI-X enabled */ + struct msix_entry msixentry[AAC_MAX_MSIX]; + struct aac_msix_ctx aac_msix[AAC_MAX_MSIX]; /* context */ + u8 adapter_shutdown; }; #define aac_adapter_interrupt(dev) \ @@ -1589,6 +1662,7 @@ struct aac_srb_reply #define VM_CtHostWrite64 20 #define VM_DrvErrTblLog 21 #define VM_NameServe64 22 +#define VM_NameServeAllBlk 30 #define MAX_VMCOMMAND_NUM 23 /* used for sizing stats array - leave last */ @@ -1611,8 +1685,13 @@ struct aac_fsinfo { __le32 fsInodeDensity; }; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */ +struct aac_blockdevinfo { + __le32 block_size; +}; + union aac_contentinfo { - struct aac_fsinfo filesys; /* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */ + struct aac_fsinfo filesys; + struct aac_blockdevinfo bdevinfo; }; /* @@ -1677,6 +1756,7 @@ struct aac_get_container_count_resp { __le32 MaxContainers; __le32 ContainerSwitchEntries; __le32 MaxPartitions; + __le32 MaxSimpleVolumes; }; @@ -1951,6 +2031,8 @@ extern struct aac_common aac_config; #define AifEnEnclosureManagement 13 /* EM_DRIVE_* */ #define EM_DRIVE_INSERTION 31 #define EM_DRIVE_REMOVAL 32 +#define EM_SES_DRIVE_INSERTION 33 +#define EM_SES_DRIVE_REMOVAL 26 #define AifEnBatteryEvent 14 /* Change in Battery State */ #define AifEnAddContainer 15 /* A new array was created */ #define AifEnDeleteContainer 16 /* A container was deleted */ @@ -1983,6 +2065,9 @@ extern struct aac_common aac_config; /* PMC NEW COMM: Request the event data */ #define AifReqEvent 200 +/* RAW device deleted */ +#define AifRawDeviceRemove 203 + /* * Adapter Initiated FIB command structures. Start with the adapter * initiated FIBs that really come from the adapter, and get responded @@ -2025,6 +2110,7 @@ void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); int aac_fib_complete(struct fib * context); #define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data) struct aac_dev *aac_init_adapter(struct aac_dev *dev); +void aac_src_access_devreg(struct aac_dev *dev, int mode); int aac_get_config_status(struct aac_dev *dev, int commit_flag); int aac_get_containers(struct aac_dev *dev); int aac_scsi_cmd(struct scsi_cmnd *cmd); diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index fbcd48d0bfc3..54195a117f72 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -689,7 +689,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) kfree (usg); } srbcmd->count = cpu_to_le32(byte_count); - psg->count = cpu_to_le32(sg_indx+1); + if (user_srbcmd->sg.count) + psg->count = cpu_to_le32(sg_indx+1); + else + psg->count = 0; status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL); } else { struct user_sgmap* upsg = &user_srbcmd->sg; @@ -775,7 +778,10 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg) } } srbcmd->count = cpu_to_le32(byte_count); - psg->count = cpu_to_le32(sg_indx+1); + if (user_srbcmd->sg.count) + psg->count = cpu_to_le32(sg_indx+1); + else + psg->count = 0; status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL); } if (status == -ERESTARTSYS) { diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 177b094c7792..45db84ad322f 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -43,6 +43,8 @@ #include "aacraid.h" +static void aac_define_int_mode(struct aac_dev *dev); + struct aac_common aac_config = { .irq_mod = 1 }; @@ -51,7 +53,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co { unsigned char *base; unsigned long size, align; - const unsigned long fibsize = 4096; + const unsigned long fibsize = dev->max_fib_size; const unsigned long printfbufsiz = 256; unsigned long host_rrq_size = 0; struct aac_init *init; @@ -91,7 +93,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION); if (dev->max_fib_size != sizeof(struct hw_fib)) init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4); - init->MiniPortRevision = cpu_to_le32(Sa_MINIPORT_REVISION); + init->Sa_MSIXVectors = cpu_to_le32(Sa_MINIPORT_REVISION); init->fsrev = cpu_to_le32(dev->fsrev); /* @@ -140,7 +142,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED); init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32)); init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff)); - init->MiniPortRevision = cpu_to_le32(0L); /* number of MSI-X */ + /* number of MSI-X */ + init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix); dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n")); } @@ -179,7 +182,7 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) { - q->numpending = 0; + atomic_set(&q->numpending, 0); q->dev = dev; init_waitqueue_head(&q->cmdready); INIT_LIST_HEAD(&q->cmdq); @@ -228,6 +231,12 @@ int aac_send_shutdown(struct aac_dev * dev) /* FIB should be freed only after getting the response from the F/W */ if (status != -ERESTARTSYS) aac_fib_free(fibctx); + dev->adapter_shutdown = 1; + if ((dev->pdev->device == PMC_DEVICE_S7 || + dev->pdev->device == PMC_DEVICE_S8 || + dev->pdev->device == PMC_DEVICE_S9) && + dev->msi_enabled) + aac_src_access_devreg(dev, AAC_ENABLE_INTX); return status; } @@ -350,8 +359,10 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) dev->raw_io_interface = dev->raw_io_64 = 0; if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, - 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && + 0, 0, 0, 0, 0, 0, + status+0, status+1, status+2, status+3, NULL)) && (status[0] == 0x00000001)) { + dev->doorbell_mask = status[3]; if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) dev->raw_io_64 = 1; dev->sync_mode = aac_sync_mode; @@ -388,6 +399,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) } } } + dev->max_msix = 0; + dev->msi_enabled = 0; + dev->adapter_shutdown = 0; if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, status+4)) @@ -461,6 +475,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) if (host->can_queue > AAC_NUM_IO_FIB) host->can_queue = AAC_NUM_IO_FIB; + if (dev->pdev->device == PMC_DEVICE_S6 || + dev->pdev->device == PMC_DEVICE_S7 || + dev->pdev->device == PMC_DEVICE_S8 || + dev->pdev->device == PMC_DEVICE_S9) + aac_define_int_mode(dev); /* * Ok now init the communication subsystem */ @@ -489,4 +508,79 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) return dev; } - +static void aac_define_int_mode(struct aac_dev *dev) +{ + + int i, msi_count; + + msi_count = i = 0; + /* max. vectors from GET_COMM_PREFERRED_SETTINGS */ + if (dev->max_msix == 0 || + dev->pdev->device == PMC_DEVICE_S6 || + dev->sync_mode) { + dev->max_msix = 1; + dev->vector_cap = + dev->scsi_host_ptr->can_queue + + AAC_NUM_MGT_FIB; + return; + } + + msi_count = min(dev->max_msix, + (unsigned int)num_online_cpus()); + + dev->max_msix = msi_count; + + if (msi_count > AAC_MAX_MSIX) + msi_count = AAC_MAX_MSIX; + + for (i = 0; i < msi_count; i++) + dev->msixentry[i].entry = i; + + if (msi_count > 1 && + pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { + i = pci_enable_msix(dev->pdev, + dev->msixentry, + msi_count); + /* Check how many MSIX vectors are allocated */ + if (i >= 0) { + dev->msi_enabled = 1; + if (i) { + msi_count = i; + if (pci_enable_msix(dev->pdev, + dev->msixentry, + msi_count)) { + dev->msi_enabled = 0; + printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n", + dev->name, dev->id, i); + } + } + } else { + dev->msi_enabled = 0; + printk(KERN_ERR "%s%d: MSIX not supported!! Will try MSI 0x%x.\n", + dev->name, dev->id, i); + } + } + + if (!dev->msi_enabled) { + msi_count = 1; + i = pci_enable_msi(dev->pdev); + + if (!i) { + dev->msi_enabled = 1; + dev->msi = 1; + } else { + printk(KERN_ERR "%s%d: MSI not supported!! Will try INTx 0x%x.\n", + dev->name, dev->id, i); + } + } + + if (!dev->msi_enabled) + dev->max_msix = msi_count = 1; + else { + if (dev->max_msix > msi_count) + dev->max_msix = msi_count; + } + dev->vector_cap = + (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) / + msi_count; +} diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index cab190af6345..4da574925284 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -208,14 +208,10 @@ struct fib *aac_fib_alloc(struct aac_dev *dev) void aac_fib_free(struct fib *fibptr) { - unsigned long flags, flagsv; + unsigned long flags; - spin_lock_irqsave(&fibptr->event_lock, flagsv); - if (fibptr->done == 2) { - spin_unlock_irqrestore(&fibptr->event_lock, flagsv); + if (fibptr->done == 2) return; - } - spin_unlock_irqrestore(&fibptr->event_lock, flagsv); spin_lock_irqsave(&fibptr->dev->fib_lock, flags); if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) @@ -321,7 +317,7 @@ static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entr /* Queue is full */ if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { printk(KERN_WARNING "Queue %d full, %u outstanding.\n", - qid, q->numpending); + qid, atomic_read(&q->numpending)); return 0; } else { *entry = q->base + *index; @@ -414,7 +410,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, struct aac_dev * dev = fibptr->dev; struct hw_fib * hw_fib = fibptr->hw_fib_va; unsigned long flags = 0; - unsigned long qflags; unsigned long mflags = 0; unsigned long sflags = 0; @@ -568,9 +563,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, int blink; if (time_is_before_eq_jiffies(timeout)) { struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; - spin_lock_irqsave(q->lock, qflags); - q->numpending--; - spin_unlock_irqrestore(q->lock, qflags); + atomic_dec(&q->numpending); if (wait == -1) { printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n" "Usually a result of a PCI interrupt routing problem;\n" @@ -775,7 +768,6 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size) int aac_fib_complete(struct fib *fibptr) { - unsigned long flags; struct hw_fib * hw_fib = fibptr->hw_fib_va; /* @@ -798,12 +790,6 @@ int aac_fib_complete(struct fib *fibptr) * command is complete that we had sent to the adapter and this * cdb could be reused. */ - spin_lock_irqsave(&fibptr->event_lock, flags); - if (fibptr->done == 2) { - spin_unlock_irqrestore(&fibptr->event_lock, flags); - return 0; - } - spin_unlock_irqrestore(&fibptr->event_lock, flags); if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) && (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))) @@ -868,7 +854,7 @@ void aac_printf(struct aac_dev *dev, u32 val) * dispatches it to the appropriate routine for handling. */ -#define AIF_SNIFF_TIMEOUT (30*HZ) +#define AIF_SNIFF_TIMEOUT (500*HZ) static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) { struct hw_fib * hw_fib = fibptr->hw_fib_va; @@ -897,6 +883,39 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) switch (le32_to_cpu(aifcmd->command)) { case AifCmdDriverNotify: switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) { + case AifRawDeviceRemove: + container = le32_to_cpu(((__le32 *)aifcmd->data)[1]); + if ((container >> 28)) { + container = (u32)-1; + break; + } + channel = (container >> 24) & 0xF; + if (channel >= dev->maximum_num_channels) { + container = (u32)-1; + break; + } + id = container & 0xFFFF; + if (id >= dev->maximum_num_physicals) { + container = (u32)-1; + break; + } + lun = (container >> 16) & 0xFF; + container = (u32)-1; + channel = aac_phys_to_logical(channel); + device_config_needed = + (((__le32 *)aifcmd->data)[0] == + cpu_to_le32(AifRawDeviceRemove)) ? DELETE : ADD; + + if (device_config_needed == ADD) { + device = scsi_device_lookup( + dev->scsi_host_ptr, + channel, id, lun); + if (device) { + scsi_remove_device(device); + scsi_device_put(device); + } + } + break; /* * Morph or Expand complete */ @@ -1044,6 +1063,8 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) { case EM_DRIVE_INSERTION: case EM_DRIVE_REMOVAL: + case EM_SES_DRIVE_INSERTION: + case EM_SES_DRIVE_REMOVAL: container = le32_to_cpu( ((__le32 *)aifcmd->data)[2]); if ((container >> 28)) { @@ -1069,8 +1090,10 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr) } channel = aac_phys_to_logical(channel); device_config_needed = - (((__le32 *)aifcmd->data)[3] - == cpu_to_le32(EM_DRIVE_INSERTION)) ? + ((((__le32 *)aifcmd->data)[3] + == cpu_to_le32(EM_DRIVE_INSERTION)) || + (((__le32 *)aifcmd->data)[3] + == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ? ADD : DELETE; break; } @@ -1247,12 +1270,13 @@ retry_next: static int _aac_reset_adapter(struct aac_dev *aac, int forced) { int index, quirks; - int retval; + int retval, i; struct Scsi_Host *host; struct scsi_device *dev; struct scsi_cmnd *command; struct scsi_cmnd *command_list; int jafo = 0; + int cpu; /* * Assumptions: @@ -1315,7 +1339,33 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) aac->comm_phys = 0; kfree(aac->queues); aac->queues = NULL; - free_irq(aac->pdev->irq, aac); + cpu = cpumask_first(cpu_online_mask); + if (aac->pdev->device == PMC_DEVICE_S6 || + aac->pdev->device == PMC_DEVICE_S7 || + aac->pdev->device == PMC_DEVICE_S8 || + aac->pdev->device == PMC_DEVICE_S9) { + if (aac->max_msix > 1) { + for (i = 0; i < aac->max_msix; i++) { + if (irq_set_affinity_hint( + aac->msixentry[i].vector, + NULL)) { + printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n", + aac->name, + aac->id, + cpu); + } + cpu = cpumask_next(cpu, + cpu_online_mask); + free_irq(aac->msixentry[i].vector, + &(aac->aac_msix[i])); + } + pci_disable_msix(aac->pdev); + } else { + free_irq(aac->pdev->irq, &(aac->aac_msix[0])); + } + } else { + free_irq(aac->pdev->irq, aac); + } if (aac->msi) pci_disable_msi(aac->pdev); kfree(aac->fsa_dev); diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index d81b2810f0f7..da9d9936e995 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c @@ -84,7 +84,7 @@ unsigned int aac_response_normal(struct aac_queue * q) * continue. The caller has already been notified that * the fib timed out. */ - dev->queues->queue[AdapNormCmdQueue].numpending--; + atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { spin_unlock_irqrestore(q->lock, flags); @@ -354,7 +354,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, * continue. The caller has already been notified that * the fib timed out. */ - dev->queues->queue[AdapNormCmdQueue].numpending--; + atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) { aac_fib_complete(fib); @@ -389,8 +389,13 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, * NOTE: we cannot touch the fib after this * call, because it may have been deallocated. */ - fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; - fib->callback(fib->callback_data, fib); + if (likely(fib->callback && fib->callback_data)) { + fib->flags &= FIB_CONTEXT_FLAG_FASTRESP; + fib->callback(fib->callback_data, fib); + } else { + aac_fib_complete(fib); + aac_fib_free(fib); + } } else { unsigned long flagv; dprintk((KERN_INFO "event_wait up\n")); diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index fdcdf9f781bc..9eec02733c86 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -56,7 +56,7 @@ #include "aacraid.h" -#define AAC_DRIVER_VERSION "1.2-0" +#define AAC_DRIVER_VERSION "1.2-1" #ifndef AAC_DRIVER_BRANCH #define AAC_DRIVER_BRANCH "" #endif @@ -251,27 +251,15 @@ static struct aac_driver_ident aac_drivers[] = { * TODO: unify with aac_scsi_cmd(). */ -static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +static int aac_queuecommand(struct Scsi_Host *shost, + struct scsi_cmnd *cmd) { - struct Scsi_Host *host = cmd->device->host; - struct aac_dev *dev = (struct aac_dev *)host->hostdata; - u32 count = 0; - cmd->scsi_done = done; - for (; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) { - struct fib * fib = &dev->fibs[count]; - struct scsi_cmnd * command; - if (fib->hw_fib_va->header.XferState && - ((command = fib->callback_data)) && - (command == cmd) && - (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) - return 0; /* Already owned by Adapter */ - } + int r = 0; cmd->SCp.phase = AAC_OWNER_LOWLEVEL; - return (aac_scsi_cmd(cmd) ? FAILED : 0); + r = (aac_scsi_cmd(cmd) ? FAILED : 0); + return r; } -static DEF_SCSI_QCMD(aac_queuecommand) - /** * aac_info - Returns the host adapter name * @shost: Scsi host to report on @@ -713,7 +701,9 @@ static long aac_cfg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; - if (!capable(CAP_SYS_RAWIO)) + struct aac_dev *aac; + aac = (struct aac_dev *)file->private_data; + if (!capable(CAP_SYS_RAWIO) || aac->adapter_shutdown) return -EPERM; mutex_lock(&aac_mutex); ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg); @@ -1082,6 +1072,9 @@ static struct scsi_host_template aac_driver_template = { static void __aac_shutdown(struct aac_dev * aac) { + int i; + int cpu; + if (aac->aif_thread) { int i; /* Clear out events first */ @@ -1095,9 +1088,37 @@ static void __aac_shutdown(struct aac_dev * aac) } aac_send_shutdown(aac); aac_adapter_disable_int(aac); - free_irq(aac->pdev->irq, aac); + cpu = cpumask_first(cpu_online_mask); + if (aac->pdev->device == PMC_DEVICE_S6 || + aac->pdev->device == PMC_DEVICE_S7 || + aac->pdev->device == PMC_DEVICE_S8 || + aac->pdev->device == PMC_DEVICE_S9) { + if (aac->max_msix > 1) { + for (i = 0; i < aac->max_msix; i++) { + if (irq_set_affinity_hint( + aac->msixentry[i].vector, + NULL)) { + printk(KERN_ERR "%s%d: Failed to reset IRQ affinity for cpu %d\n", + aac->name, + aac->id, + cpu); + } + cpu = cpumask_next(cpu, + cpu_online_mask); + free_irq(aac->msixentry[i].vector, + &(aac->aac_msix[i])); + } + } else { + free_irq(aac->pdev->irq, + &(aac->aac_msix[0])); + } + } else { + free_irq(aac->pdev->irq, aac); + } if (aac->msi) pci_disable_msi(aac->pdev); + else if (aac->max_msix > 1) + pci_disable_msix(aac->pdev); } static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 5c6a8703f535..9570612b80ce 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -400,16 +400,13 @@ int aac_rx_deliver_producer(struct fib * fib) { struct aac_dev *dev = fib->dev; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; - unsigned long qflags; u32 Index; unsigned long nointr = 0; - spin_lock_irqsave(q->lock, qflags); aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr); - q->numpending++; + atomic_inc(&q->numpending); *(q->headers.producer) = cpu_to_le32(Index + 1); - spin_unlock_irqrestore(q->lock, qflags); if (!(nointr & aac_config.irq_mod)) aac_adapter_notify(dev, AdapNormCmdQueue); @@ -426,15 +423,12 @@ static int aac_rx_deliver_message(struct fib * fib) { struct aac_dev *dev = fib->dev; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; - unsigned long qflags; u32 Index; u64 addr; volatile void __iomem *device; unsigned long count = 10000000L; /* 50 seconds */ - spin_lock_irqsave(q->lock, qflags); - q->numpending++; - spin_unlock_irqrestore(q->lock, qflags); + atomic_inc(&q->numpending); for(;;) { Index = rx_readl(dev, MUnit.InboundQueue); if (unlikely(Index == 0xFFFFFFFFL)) @@ -442,9 +436,7 @@ static int aac_rx_deliver_message(struct fib * fib) if (likely(Index != 0xFFFFFFFFL)) break; if (--count == 0) { - spin_lock_irqsave(q->lock, qflags); - q->numpending--; - spin_unlock_irqrestore(q->lock, qflags); + atomic_dec(&q->numpending); return -ETIMEDOUT; } udelay(5); diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 9c65aed26212..4596e9dd757c 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -44,98 +44,128 @@ #include "aacraid.h" -static irqreturn_t aac_src_intr_message(int irq, void *dev_id) +static int aac_src_get_sync_status(struct aac_dev *dev); + +irqreturn_t aac_src_intr_message(int irq, void *dev_id) { - struct aac_dev *dev = dev_id; + struct aac_msix_ctx *ctx; + struct aac_dev *dev; unsigned long bellbits, bellbits_shifted; - int our_interrupt = 0; - int isFastResponse; + int vector_no; + int isFastResponse, mode; u32 index, handle; - bellbits = src_readl(dev, MUnit.ODR_R); - if (bellbits & PmDoorBellResponseSent) { - bellbits = PmDoorBellResponseSent; - /* handle async. status */ - src_writel(dev, MUnit.ODR_C, bellbits); - src_readl(dev, MUnit.ODR_C); - our_interrupt = 1; - index = dev->host_rrq_idx; - for (;;) { - isFastResponse = 0; - /* remove toggle bit (31) */ - handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff; - /* check fast response bit (30) */ - if (handle & 0x40000000) - isFastResponse = 1; - handle &= 0x0000ffff; - if (handle == 0) - break; - - aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); - - dev->host_rrq[index++] = 0; - if (index == dev->scsi_host_ptr->can_queue + - AAC_NUM_MGT_FIB) - index = 0; - dev->host_rrq_idx = index; + ctx = (struct aac_msix_ctx *)dev_id; + dev = ctx->dev; + vector_no = ctx->vector_no; + + if (dev->msi_enabled) { + mode = AAC_INT_MODE_MSI; + if (vector_no == 0) { + bellbits = src_readl(dev, MUnit.ODR_MSI); + if (bellbits & 0x40000) + mode |= AAC_INT_MODE_AIF; + if (bellbits & 0x1000) + mode |= AAC_INT_MODE_SYNC; } } else { - bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); - if (bellbits_shifted & DoorBellAifPending) { + mode = AAC_INT_MODE_INTX; + bellbits = src_readl(dev, MUnit.ODR_R); + if (bellbits & PmDoorBellResponseSent) { + bellbits = PmDoorBellResponseSent; + src_writel(dev, MUnit.ODR_C, bellbits); + src_readl(dev, MUnit.ODR_C); + } else { + bellbits_shifted = (bellbits >> SRC_ODR_SHIFT); src_writel(dev, MUnit.ODR_C, bellbits); src_readl(dev, MUnit.ODR_C); - our_interrupt = 1; - /* handle AIF */ - aac_intr_normal(dev, 0, 2, 0, NULL); - } else if (bellbits_shifted & OUTBOUNDDOORBELL_0) { - unsigned long sflags; - struct list_head *entry; - int send_it = 0; - extern int aac_sync_mode; + if (bellbits_shifted & DoorBellAifPending) + mode |= AAC_INT_MODE_AIF; + else if (bellbits_shifted & OUTBOUNDDOORBELL_0) + mode |= AAC_INT_MODE_SYNC; + } + } + + if (mode & AAC_INT_MODE_SYNC) { + unsigned long sflags; + struct list_head *entry; + int send_it = 0; + extern int aac_sync_mode; + + if (!aac_sync_mode && !dev->msi_enabled) { src_writel(dev, MUnit.ODR_C, bellbits); src_readl(dev, MUnit.ODR_C); + } - if (!aac_sync_mode) { - src_writel(dev, MUnit.ODR_C, bellbits); - src_readl(dev, MUnit.ODR_C); - our_interrupt = 1; + if (dev->sync_fib) { + if (dev->sync_fib->callback) + dev->sync_fib->callback(dev->sync_fib->callback_data, + dev->sync_fib); + spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); + if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { + dev->management_fib_count--; + up(&dev->sync_fib->event_wait); } - - if (dev->sync_fib) { - our_interrupt = 1; - if (dev->sync_fib->callback) - dev->sync_fib->callback(dev->sync_fib->callback_data, - dev->sync_fib); - spin_lock_irqsave(&dev->sync_fib->event_lock, sflags); - if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) { - dev->management_fib_count--; - up(&dev->sync_fib->event_wait); - } - spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags); - spin_lock_irqsave(&dev->sync_lock, sflags); - if (!list_empty(&dev->sync_fib_list)) { - entry = dev->sync_fib_list.next; - dev->sync_fib = list_entry(entry, struct fib, fiblink); - list_del(entry); - send_it = 1; - } else { - dev->sync_fib = NULL; - } - spin_unlock_irqrestore(&dev->sync_lock, sflags); - if (send_it) { - aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, - (u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0, - NULL, NULL, NULL, NULL, NULL); - } + spin_unlock_irqrestore(&dev->sync_fib->event_lock, + sflags); + spin_lock_irqsave(&dev->sync_lock, sflags); + if (!list_empty(&dev->sync_fib_list)) { + entry = dev->sync_fib_list.next; + dev->sync_fib = list_entry(entry, + struct fib, + fiblink); + list_del(entry); + send_it = 1; + } else { + dev->sync_fib = NULL; + } + spin_unlock_irqrestore(&dev->sync_lock, sflags); + if (send_it) { + aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB, + (u32)dev->sync_fib->hw_fib_pa, + 0, 0, 0, 0, 0, + NULL, NULL, NULL, NULL, NULL); } } + if (!dev->msi_enabled) + mode = 0; + + } + + if (mode & AAC_INT_MODE_AIF) { + /* handle AIF */ + aac_intr_normal(dev, 0, 2, 0, NULL); + if (dev->msi_enabled) + aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT); + mode = 0; } - if (our_interrupt) { - return IRQ_HANDLED; + if (mode) { + index = dev->host_rrq_idx[vector_no]; + + for (;;) { + isFastResponse = 0; + /* remove toggle bit (31) */ + handle = (dev->host_rrq[index] & 0x7fffffff); + /* check fast response bit (30) */ + if (handle & 0x40000000) + isFastResponse = 1; + handle &= 0x0000ffff; + if (handle == 0) + break; + if (dev->msi_enabled && dev->max_msix > 1) + atomic_dec(&dev->rrq_outstanding[vector_no]); + aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL); + dev->host_rrq[index++] = 0; + if (index == (vector_no + 1) * dev->vector_cap) + index = vector_no * dev->vector_cap; + dev->host_rrq_idx[vector_no] = index; + } + mode = 0; } - return IRQ_NONE; + + return IRQ_HANDLED; } /** @@ -155,7 +185,7 @@ static void aac_src_disable_interrupt(struct aac_dev *dev) static void aac_src_enable_interrupt_message(struct aac_dev *dev) { - src_writel(dev, MUnit.OIMR, dev->OIMR = 0xfffffff8); + aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT); } /** @@ -174,6 +204,7 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command, u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4) { unsigned long start; + unsigned long delay; int ok; /* @@ -191,7 +222,10 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command, /* * Clear the synch command doorbell to start on a clean slate. */ - src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + if (!dev->msi_enabled) + src_writel(dev, + MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); /* * Disable doorbell interrupts @@ -213,19 +247,29 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command, ok = 0; start = jiffies; - /* - * Wait up to 5 minutes - */ - while (time_before(jiffies, start+300*HZ)) { + if (command == IOP_RESET_ALWAYS) { + /* Wait up to 10 sec */ + delay = 10*HZ; + } else { + /* Wait up to 5 minutes */ + delay = 300*HZ; + } + while (time_before(jiffies, start+delay)) { udelay(5); /* Delay 5 microseconds to let Mon960 get info. */ /* * Mon960 will set doorbell0 bit when it has completed the command. */ - if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) { + if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) { /* * Clear the doorbell. */ - src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + if (dev->msi_enabled) + aac_src_access_devreg(dev, + AAC_CLEAR_SYNC_BIT); + else + src_writel(dev, + MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); ok = 1; break; } @@ -254,11 +298,16 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command, *r3 = readl(&dev->IndexRegs->Mailbox[3]); if (r4) *r4 = readl(&dev->IndexRegs->Mailbox[4]); - + if (command == GET_COMM_PREFERRED_SETTINGS) + dev->max_msix = + readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF; /* * Clear the synch command doorbell. */ - src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); + if (!dev->msi_enabled) + src_writel(dev, + MUnit.ODR_C, + OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); } /* @@ -335,9 +384,14 @@ static void aac_src_notify_adapter(struct aac_dev *dev, u32 event) static void aac_src_start_adapter(struct aac_dev *dev) { struct aac_init *init; + int i; /* reset host_rrq_idx first */ - dev->host_rrq_idx = 0; + for (i = 0; i < dev->max_msix; i++) { + dev->host_rrq_idx[i] = i * dev->vector_cap; + atomic_set(&dev->rrq_outstanding[i], 0); + } + dev->fibs_pushed_no = 0; init = dev->init; init->HostElapsedSeconds = cpu_to_le32(get_seconds()); @@ -390,15 +444,39 @@ static int aac_src_deliver_message(struct fib *fib) { struct aac_dev *dev = fib->dev; struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; - unsigned long qflags; u32 fibsize; dma_addr_t address; struct aac_fib_xporthdr *pFibX; u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size); - spin_lock_irqsave(q->lock, qflags); - q->numpending++; - spin_unlock_irqrestore(q->lock, qflags); + atomic_inc(&q->numpending); + + if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest && + dev->max_msix > 1) { + u_int16_t vector_no, first_choice = 0xffff; + + vector_no = dev->fibs_pushed_no % dev->max_msix; + do { + vector_no += 1; + if (vector_no == dev->max_msix) + vector_no = 1; + if (atomic_read(&dev->rrq_outstanding[vector_no]) < + dev->vector_cap) + break; + if (0xffff == first_choice) + first_choice = vector_no; + else if (vector_no == first_choice) + break; + } while (1); + if (vector_no == first_choice) + vector_no = 0; + atomic_inc(&dev->rrq_outstanding[vector_no]); + if (dev->fibs_pushed_no == 0xffffffff) + dev->fibs_pushed_no = 0; + else + dev->fibs_pushed_no++; + fib->hw_fib_va->header.Handle += (vector_no << 16); + } if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) { /* Calculate the amount to the fibsize bits */ @@ -498,15 +576,34 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled) if (bled) printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n", dev->name, dev->id, bled); + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); - if (bled || (var != 0x00000001)) - return -EINVAL; - if (dev->supplement_adapter_info.SupportedOptions2 & - AAC_OPTION_DOORBELL_RESET) { - src_writel(dev, MUnit.IDR, reset_mask); + if ((bled || (var != 0x00000001)) && + !dev->doorbell_mask) + return -EINVAL; + else if (dev->doorbell_mask) { + reset_mask = dev->doorbell_mask; + bled = 0; + var = 0x00000001; + } + + if ((dev->pdev->device == PMC_DEVICE_S7 || + dev->pdev->device == PMC_DEVICE_S8 || + dev->pdev->device == PMC_DEVICE_S9) && dev->msi_enabled) { + aac_src_access_devreg(dev, AAC_ENABLE_INTX); + dev->msi_enabled = 0; msleep(5000); /* Delay 5 seconds */ } + + if (!bled && (dev->supplement_adapter_info.SupportedOptions2 & + AAC_OPTION_DOORBELL_RESET)) { + src_writel(dev, MUnit.IDR, reset_mask); + ssleep(45); + } else { + src_writel(dev, MUnit.IDR, 0x100); + ssleep(45); + } } if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC) @@ -527,7 +624,6 @@ int aac_src_select_comm(struct aac_dev *dev, int comm) { switch (comm) { case AAC_COMM_MESSAGE: - dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; dev->a_ops.adapter_intr = aac_src_intr_message; dev->a_ops.adapter_deliver = aac_src_deliver_message; break; @@ -625,6 +721,7 @@ int aac_src_init(struct aac_dev *dev) */ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; dev->a_ops.adapter_notify = aac_src_notify_adapter; dev->a_ops.adapter_sync_cmd = src_sync_cmd; dev->a_ops.adapter_check_health = aac_src_check_health; @@ -646,8 +743,11 @@ int aac_src_init(struct aac_dev *dev) dev->msi = aac_msi && !pci_enable_msi(dev->pdev); + dev->aac_msix[0].vector_no = 0; + dev->aac_msix[0].dev = dev; + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, - IRQF_SHARED, "aacraid", dev) < 0) { + IRQF_SHARED, "aacraid", &(dev->aac_msix[0])) < 0) { if (dev->msi) pci_disable_msi(dev->pdev); @@ -659,6 +759,7 @@ int aac_src_init(struct aac_dev *dev) dev->dbg_base = pci_resource_start(dev->pdev, 2); dev->dbg_base_mapped = dev->regs.src.bar1; dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; + dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; aac_adapter_enable_int(dev); @@ -688,7 +789,9 @@ int aac_srcv_init(struct aac_dev *dev) unsigned long status; int restart = 0; int instance = dev->id; + int i, j; const char *name = dev->name; + int cpu; dev->a_ops.adapter_ioremap = aac_srcv_ioremap; dev->a_ops.adapter_comm = aac_src_select_comm; @@ -784,6 +887,7 @@ int aac_srcv_init(struct aac_dev *dev) */ dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter; dev->a_ops.adapter_disable_int = aac_src_disable_interrupt; + dev->a_ops.adapter_enable_int = aac_src_disable_interrupt; dev->a_ops.adapter_notify = aac_src_notify_adapter; dev->a_ops.adapter_sync_cmd = src_sync_cmd; dev->a_ops.adapter_check_health = aac_src_check_health; @@ -802,18 +906,54 @@ int aac_srcv_init(struct aac_dev *dev) goto error_iounmap; if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) goto error_iounmap; - dev->msi = aac_msi && !pci_enable_msi(dev->pdev); - if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, - IRQF_SHARED, "aacraid", dev) < 0) { - if (dev->msi) - pci_disable_msi(dev->pdev); - printk(KERN_ERR "%s%d: Interrupt unavailable.\n", - name, instance); - goto error_iounmap; + if (dev->msi_enabled) + aac_src_access_devreg(dev, AAC_ENABLE_MSIX); + if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) { + cpu = cpumask_first(cpu_online_mask); + for (i = 0; i < dev->max_msix; i++) { + dev->aac_msix[i].vector_no = i; + dev->aac_msix[i].dev = dev; + + if (request_irq(dev->msixentry[i].vector, + dev->a_ops.adapter_intr, + 0, + "aacraid", + &(dev->aac_msix[i]))) { + printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n", + name, instance, i); + for (j = 0 ; j < i ; j++) + free_irq(dev->msixentry[j].vector, + &(dev->aac_msix[j])); + pci_disable_msix(dev->pdev); + goto error_iounmap; + } + if (irq_set_affinity_hint( + dev->msixentry[i].vector, + get_cpu_mask(cpu))) { + printk(KERN_ERR "%s%d: Failed to set IRQ affinity for cpu %d\n", + name, instance, cpu); + } + cpu = cpumask_next(cpu, cpu_online_mask); + } + } else { + dev->aac_msix[0].vector_no = 0; + dev->aac_msix[0].dev = dev; + + if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, + IRQF_SHARED, + "aacraid", + &(dev->aac_msix[0])) < 0) { + if (dev->msi) + pci_disable_msi(dev->pdev); + printk(KERN_ERR "%s%d: Interrupt unavailable.\n", + name, instance); + goto error_iounmap; + } } dev->dbg_base = dev->base_start; dev->dbg_base_mapped = dev->base; dev->dbg_size = dev->base_size; + dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message; aac_adapter_enable_int(dev); @@ -831,3 +971,93 @@ error_iounmap: return -1; } +void aac_src_access_devreg(struct aac_dev *dev, int mode) +{ + u_int32_t val; + + switch (mode) { + case AAC_ENABLE_INTERRUPT: + src_writel(dev, + MUnit.OIMR, + dev->OIMR = (dev->msi_enabled ? + AAC_INT_ENABLE_TYPE1_MSIX : + AAC_INT_ENABLE_TYPE1_INTX)); + break; + + case AAC_DISABLE_INTERRUPT: + src_writel(dev, + MUnit.OIMR, + dev->OIMR = AAC_INT_DISABLE_ALL); + break; + + case AAC_ENABLE_MSIX: + /* set bit 6 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x40; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + /* unmask int. */ + val = PMC_ALL_INTERRUPT_BITS; + src_writel(dev, MUnit.IOAR, val); + val = src_readl(dev, MUnit.OIMR); + src_writel(dev, + MUnit.OIMR, + val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); + break; + + case AAC_DISABLE_MSIX: + /* reset bit 6 */ + val = src_readl(dev, MUnit.IDR); + val &= ~0x40; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + break; + + case AAC_CLEAR_AIF_BIT: + /* set bit 5 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x20; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + break; + + case AAC_CLEAR_SYNC_BIT: + /* set bit 4 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x10; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + break; + + case AAC_ENABLE_INTX: + /* set bit 7 */ + val = src_readl(dev, MUnit.IDR); + val |= 0x80; + src_writel(dev, MUnit.IDR, val); + src_readl(dev, MUnit.IDR); + /* unmask int. */ + val = PMC_ALL_INTERRUPT_BITS; + src_writel(dev, MUnit.IOAR, val); + src_readl(dev, MUnit.IOAR); + val = src_readl(dev, MUnit.OIMR); + src_writel(dev, MUnit.OIMR, + val & (~(PMC_GLOBAL_INT_BIT2))); + break; + + default: + break; + } +} + +static int aac_src_get_sync_status(struct aac_dev *dev) +{ + + int val; + + if (dev->msi_enabled) + val = src_readl(dev, MUnit.ODR_MSI) & 0x1000 ? 1 : 0; + else + val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT; + + return val; +} |