diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/scsi/megaraid | |
download | lwn-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz lwn-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/scsi/megaraid')
-rw-r--r-- | drivers/scsi/megaraid/Kconfig.megaraid | 78 | ||||
-rw-r--r-- | drivers/scsi/megaraid/Makefile | 2 | ||||
-rw-r--r-- | drivers/scsi/megaraid/mbox_defs.h | 790 | ||||
-rw-r--r-- | drivers/scsi/megaraid/mega_common.h | 286 | ||||
-rw-r--r-- | drivers/scsi/megaraid/megaraid_ioctl.h | 296 | ||||
-rw-r--r-- | drivers/scsi/megaraid/megaraid_mbox.c | 4276 | ||||
-rw-r--r-- | drivers/scsi/megaraid/megaraid_mbox.h | 288 | ||||
-rw-r--r-- | drivers/scsi/megaraid/megaraid_mm.c | 1255 | ||||
-rw-r--r-- | drivers/scsi/megaraid/megaraid_mm.h | 102 |
9 files changed, 7373 insertions, 0 deletions
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid new file mode 100644 index 000000000000..917d591d90b2 --- /dev/null +++ b/drivers/scsi/megaraid/Kconfig.megaraid @@ -0,0 +1,78 @@ +config MEGARAID_NEWGEN + bool "LSI Logic New Generation RAID Device Drivers" + depends on PCI && SCSI + help + LSI Logic RAID Device Drivers + +config MEGARAID_MM + tristate "LSI Logic Management Module (New Driver)" + depends on PCI && SCSI && MEGARAID_NEWGEN + help + Management Module provides ioctl, sysfs support for LSI Logic + RAID controllers. + To compile this driver as a module, choose M here: the + module will be called megaraid_mm + + +config MEGARAID_MAILBOX + tristate "LSI Logic MegaRAID Driver (New Driver)" + depends on PCI && SCSI && MEGARAID_MM + help + List of supported controllers + + OEM Product Name VID :DID :SVID:SSID + --- ------------ ---- ---- ---- ---- + Dell PERC3/QC 101E:1960:1028:0471 + Dell PERC3/DC 101E:1960:1028:0493 + Dell PERC3/SC 101E:1960:1028:0475 + Dell PERC3/Di 1028:000E:1028:0123 + Dell PERC4/SC 1000:1960:1028:0520 + Dell PERC4/DC 1000:1960:1028:0518 + Dell PERC4/QC 1000:0407:1028:0531 + Dell PERC4/Di 1028:000F:1028:014A + Dell PERC 4e/Si 1028:0013:1028:016c + Dell PERC 4e/Di 1028:0013:1028:016d + Dell PERC 4e/Di 1028:0013:1028:016e + Dell PERC 4e/Di 1028:0013:1028:016f + Dell PERC 4e/Di 1028:0013:1028:0170 + Dell PERC 4e/DC 1000:0408:1028:0002 + Dell PERC 4e/SC 1000:0408:1028:0001 + LSI MegaRAID SCSI 320-0 1000:1960:1000:A520 + LSI MegaRAID SCSI 320-1 1000:1960:1000:0520 + LSI MegaRAID SCSI 320-2 1000:1960:1000:0518 + LSI MegaRAID SCSI 320-0X 1000:0407:1000:0530 + LSI MegaRAID SCSI 320-2X 1000:0407:1000:0532 + LSI MegaRAID SCSI 320-4X 1000:0407:1000:0531 + LSI MegaRAID SCSI 320-1E 1000:0408:1000:0001 + LSI MegaRAID SCSI 320-2E 1000:0408:1000:0002 + LSI MegaRAID SATA 150-4 1000:1960:1000:4523 + LSI MegaRAID SATA 150-6 1000:1960:1000:0523 + LSI MegaRAID SATA 300-4X 1000:0409:1000:3004 + LSI MegaRAID SATA 300-8X 1000:0409:1000:3008 + INTEL RAID Controller SRCU42X 1000:0407:8086:0532 + INTEL RAID Controller SRCS16 1000:1960:8086:0523 + INTEL RAID Controller SRCU42E 1000:0408:8086:0002 + INTEL RAID Controller SRCZCRX 1000:0407:8086:0530 + INTEL RAID Controller SRCS28X 1000:0409:8086:3008 + INTEL RAID Controller SROMBU42E 1000:0408:8086:3431 + INTEL RAID Controller SROMBU42E 1000:0408:8086:3499 + INTEL RAID Controller SRCU51L 1000:1960:8086:0520 + FSC MegaRAID PCI Express ROMB 1000:0408:1734:1065 + ACER MegaRAID ROMB-2E 1000:0408:1025:004D + NEC MegaRAID PCI Express ROMB 1000:0408:1033:8287 + + To compile this driver as a module, choose M here: the + module will be called megaraid_mbox + +if MEGARAID_NEWGEN=n +config MEGARAID_LEGACY + tristate "LSI Logic Legacy MegaRAID Driver" + depends on PCI && SCSI + help + This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490 + and 467 SCSI host adapters. This driver also support the all U320 + RAID controllers + + To compile this driver as a module, choose M here: the + module will be called megaraid +endif diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile new file mode 100644 index 000000000000..6dd99f275722 --- /dev/null +++ b/drivers/scsi/megaraid/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o +obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h new file mode 100644 index 000000000000..3052869f51f4 --- /dev/null +++ b/drivers/scsi/megaraid/mbox_defs.h @@ -0,0 +1,790 @@ +/* + * + * Linux MegaRAID Unified device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : mbox_defs.h + * + */ +#ifndef _MRAID_MBOX_DEFS_H_ +#define _MRAID_MBOX_DEFS_H_ + +#include <linux/types.h> + +/* + * Commands and states for mailbox based controllers + */ + +#define MBOXCMD_LREAD 0x01 +#define MBOXCMD_LWRITE 0x02 +#define MBOXCMD_PASSTHRU 0x03 +#define MBOXCMD_ADPEXTINQ 0x04 +#define MBOXCMD_ADAPTERINQ 0x05 +#define MBOXCMD_LREAD64 0xA7 +#define MBOXCMD_LWRITE64 0xA8 +#define MBOXCMD_PASSTHRU64 0xC3 +#define MBOXCMD_EXTPTHRU 0xE3 + +#define MAIN_MISC_OPCODE 0xA4 +#define GET_MAX_SG_SUPPORT 0x01 +#define SUPPORT_EXT_CDB 0x16 + +#define FC_NEW_CONFIG 0xA1 +#define NC_SUBOP_PRODUCT_INFO 0x0E +#define NC_SUBOP_ENQUIRY3 0x0F +#define ENQ3_GET_SOLICITED_FULL 0x02 +#define OP_DCMD_READ_CONFIG 0x04 +#define NEW_READ_CONFIG_8LD 0x67 +#define READ_CONFIG_8LD 0x07 +#define FLUSH_ADAPTER 0x0A +#define FLUSH_SYSTEM 0xFE + +/* + * Command for random deletion of logical drives + */ +#define FC_DEL_LOGDRV 0xA4 +#define OP_SUP_DEL_LOGDRV 0x2A +#define OP_GET_LDID_MAP 0x18 +#define OP_DEL_LOGDRV 0x1C + +/* + * BIOS commands + */ +#define IS_BIOS_ENABLED 0x62 +#define GET_BIOS 0x01 +#define CHNL_CLASS 0xA9 +#define GET_CHNL_CLASS 0x00 +#define SET_CHNL_CLASS 0x01 +#define CH_RAID 0x01 +#define CH_SCSI 0x00 +#define BIOS_PVT_DATA 0x40 +#define GET_BIOS_PVT_DATA 0x00 + + +/* + * Commands to support clustering + */ +#define GET_TARGET_ID 0x7D +#define CLUSTER_OP 0x70 +#define GET_CLUSTER_MODE 0x02 +#define CLUSTER_CMD 0x6E +#define RESERVE_LD 0x01 +#define RELEASE_LD 0x02 +#define RESET_RESERVATIONS 0x03 +#define RESERVATION_STATUS 0x04 +#define RESERVE_PD 0x05 +#define RELEASE_PD 0x06 + + +/* + * Module battery status + */ +#define BATTERY_MODULE_MISSING 0x01 +#define BATTERY_LOW_VOLTAGE 0x02 +#define BATTERY_TEMP_HIGH 0x04 +#define BATTERY_PACK_MISSING 0x08 +#define BATTERY_CHARGE_MASK 0x30 +#define BATTERY_CHARGE_DONE 0x00 +#define BATTERY_CHARGE_INPROG 0x10 +#define BATTERY_CHARGE_FAIL 0x20 +#define BATTERY_CYCLES_EXCEEDED 0x40 + +/* + * Physical drive states. + */ +#define PDRV_UNCNF 0 +#define PDRV_ONLINE 3 +#define PDRV_FAILED 4 +#define PDRV_RBLD 5 +#define PDRV_HOTSPARE 6 + + +/* + * Raid logical drive states. + */ +#define RDRV_OFFLINE 0 +#define RDRV_DEGRADED 1 +#define RDRV_OPTIMAL 2 +#define RDRV_DELETED 3 + +/* + * Read, write and cache policies + */ +#define NO_READ_AHEAD 0 +#define READ_AHEAD 1 +#define ADAP_READ_AHEAD 2 +#define WRMODE_WRITE_THRU 0 +#define WRMODE_WRITE_BACK 1 +#define CACHED_IO 0 +#define DIRECT_IO 1 + +#define MAX_LOGICAL_DRIVES_8LD 8 +#define MAX_LOGICAL_DRIVES_40LD 40 +#define FC_MAX_PHYSICAL_DEVICES 256 +#define MAX_MBOX_CHANNELS 5 +#define MAX_MBOX_TARGET 15 +#define MBOX_MAX_PHYSICAL_DRIVES MAX_MBOX_CHANNELS*MAX_MBOX_TARGET +#define MAX_ROW_SIZE_40LD 32 +#define MAX_ROW_SIZE_8LD 8 +#define SPAN_DEPTH_8_SPANS 8 +#define SPAN_DEPTH_4_SPANS 4 +#define MAX_REQ_SENSE_LEN 0x20 + + + +/** + * struct mbox_t - Driver and f/w handshake structure. + * @cmd : firmware command + * @cmdid : command id + * @numsectors : number of sectors to be transferred + * @lba : Logical Block Address on LD + * @xferaddr : DMA address for data transfer + * @logdrv : logical drive number + * @numsge : number of scatter gather elements in sg list + * @resvd : reserved + * @busy : f/w busy, must wait to issue more commands. + * @numstatus : number of commands completed. + * @status : status of the commands completed + * @completed : array of completed command ids. + * @poll : poll and ack sequence + * @ack : poll and ack sequence + * + * The central handshake structure between the driver and the firmware. This + * structure must be allocated by the driver and aligned at 8-byte boundary. + */ +#define MBOX_MAX_FIRMWARE_STATUS 46 +typedef struct { + uint8_t cmd; + uint8_t cmdid; + uint16_t numsectors; + uint32_t lba; + uint32_t xferaddr; + uint8_t logdrv; + uint8_t numsge; + uint8_t resvd; + uint8_t busy; + uint8_t numstatus; + uint8_t status; + uint8_t completed[MBOX_MAX_FIRMWARE_STATUS]; + uint8_t poll; + uint8_t ack; +} __attribute__ ((packed)) mbox_t; + + +/** + * mbox64_t - 64-bit extension for the mailbox + * @segment_lo : the low 32-bits of the address of the scatter-gather list + * @segment_hi : the upper 32-bits of the address of the scatter-gather list + * @mbox : 32-bit mailbox, whose xferadder field must be set to + * 0xFFFFFFFF + * + * This is the extension of the 32-bit mailbox to be able to perform DMA + * beyond 4GB address range. + */ +typedef struct { + uint32_t xferaddr_lo; + uint32_t xferaddr_hi; + mbox_t mbox32; +} __attribute__ ((packed)) mbox64_t; + +/* + * mailbox structure used for internal commands + */ +typedef struct { + u8 cmd; + u8 cmdid; + u8 opcode; + u8 subopcode; + u32 lba; + u32 xferaddr; + u8 logdrv; + u8 rsvd[3]; + u8 numstatus; + u8 status; +} __attribute__ ((packed)) int_mbox_t; + +/** + * mraid_passthru_t - passthru structure to issue commands to physical devices + * @timeout : command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr + * @ars : set if ARS required after check condition + * @islogical : set if command meant for logical devices + * @logdrv : logical drive number if command for LD + * @channel : Channel on which physical device is located + * @target : SCSI target of the device + * @queuetag : unused + * @queueaction : unused + * @cdb : SCSI CDB + * @cdblen : length of the CDB + * @reqsenselen : amount of request sense data to be returned + * @reqsensearea : Sense information buffer + * @numsge : number of scatter-gather elements in the sg list + * @scsistatus : SCSI status of the command completed. + * @dataxferaddr : DMA data transfer address + * @dataxferlen : amount of the data to be transferred. + */ +typedef struct { + uint8_t timeout :3; + uint8_t ars :1; + uint8_t reserved :3; + uint8_t islogical :1; + uint8_t logdrv; + uint8_t channel; + uint8_t target; + uint8_t queuetag; + uint8_t queueaction; + uint8_t cdb[10]; + uint8_t cdblen; + uint8_t reqsenselen; + uint8_t reqsensearea[MAX_REQ_SENSE_LEN]; + uint8_t numsge; + uint8_t scsistatus; + uint32_t dataxferaddr; + uint32_t dataxferlen; +} __attribute__ ((packed)) mraid_passthru_t; + +typedef struct { + + uint32_t dataxferaddr_lo; + uint32_t dataxferaddr_hi; + mraid_passthru_t pthru32; + +} __attribute__ ((packed)) mega_passthru64_t; + +/** + * mraid_epassthru_t - passthru structure to issue commands to physical devices + * @timeout : command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr + * @ars : set if ARS required after check condition + * @rsvd1 : reserved field + * @cd_rom : (?) + * @rsvd2 : reserved field + * @islogical : set if command meant for logical devices + * @logdrv : logical drive number if command for LD + * @channel : Channel on which physical device is located + * @target : SCSI target of the device + * @queuetag : unused + * @queueaction : unused + * @cdblen : length of the CDB + * @rsvd3 : reserved field + * @cdb : SCSI CDB + * @numsge : number of scatter-gather elements in the sg list + * @status : SCSI status of the command completed. + * @reqsenselen : amount of request sense data to be returned + * @reqsensearea : Sense information buffer + * @rsvd4 : reserved field + * @dataxferaddr : DMA data transfer address + * @dataxferlen : amount of the data to be transferred. + */ +typedef struct { + uint8_t timeout :3; + uint8_t ars :1; + uint8_t rsvd1 :1; + uint8_t cd_rom :1; + uint8_t rsvd2 :1; + uint8_t islogical :1; + uint8_t logdrv; + uint8_t channel; + uint8_t target; + uint8_t queuetag; + uint8_t queueaction; + uint8_t cdblen; + uint8_t rsvd3; + uint8_t cdb[16]; + uint8_t numsge; + uint8_t status; + uint8_t reqsenselen; + uint8_t reqsensearea[MAX_REQ_SENSE_LEN]; + uint8_t rsvd4; + uint32_t dataxferaddr; + uint32_t dataxferlen; +} __attribute__ ((packed)) mraid_epassthru_t; + + +/** + * mraid_pinfo_t - product info, static information about the controller + * @data_size : current size in bytes (not including resvd) + * @config_signature : Current value is 0x00282008 + * @fw_version : Firmware version + * @bios_version : version of the BIOS + * @product_name : Name given to the controller + * @max_commands : Maximum concurrent commands supported + * @nchannels : Number of SCSI Channels detected + * @fc_loop_present : Number of Fibre Loops detected + * @mem_type : EDO, FPM, SDRAM etc + * @signature : + * @dram_size : In terms of MB + * @subsysid : device PCI subsystem ID + * @subsysvid : device PCI subsystem vendor ID + * @notify_counters : + * @pad1k : 135 + 889 resvd = 1024 total size + * + * This structures holds the information about the controller which is not + * expected to change dynamically. + * + * The current value of config signature is 0x00282008: + * 0x28 = MAX_LOGICAL_DRIVES, + * 0x20 = Number of stripes and + * 0x08 = Number of spans + */ +typedef struct { + uint32_t data_size; + uint32_t config_signature; + uint8_t fw_version[16]; + uint8_t bios_version[16]; + uint8_t product_name[80]; + uint8_t max_commands; + uint8_t nchannels; + uint8_t fc_loop_present; + uint8_t mem_type; + uint32_t signature; + uint16_t dram_size; + uint16_t subsysid; + uint16_t subsysvid; + uint8_t notify_counters; + uint8_t pad1k[889]; +} __attribute__ ((packed)) mraid_pinfo_t; + + +/** + * mraid_notify_t - the notification structure + * @global_counter : Any change increments this counter + * @param_counter : Indicates any params changed + * @param_id : Param modified - defined below + * @param_val : New val of last param modified + * @write_config_counter : write config occurred + * @write_config_rsvd : + * @ldrv_op_counter : Indicates ldrv op started/completed + * @ldrv_opid : ldrv num + * @ldrv_opcmd : ldrv operation - defined below + * @ldrv_opstatus : status of the operation + * @ldrv_state_counter : Indicates change of ldrv state + * @ldrv_state_id : ldrv num + * @ldrv_state_new : New state + * @ldrv_state_old : old state + * @pdrv_state_counter : Indicates change of ldrv state + * @pdrv_state_id : pdrv id + * @pdrv_state_new : New state + * @pdrv_state_old : old state + * @pdrv_fmt_counter : Indicates pdrv format started/over + * @pdrv_fmt_id : pdrv id + * @pdrv_fmt_val : format started/over + * @pdrv_fmt_rsvd : + * @targ_xfer_counter : Indicates SCSI-2 Xfer rate change + * @targ_xfer_id : pdrv Id + * @targ_xfer_val : new Xfer params of last pdrv + * @targ_xfer_rsvd : + * @fcloop_id_chg_counter : Indicates loopid changed + * @fcloopid_pdrvid : pdrv id + * @fcloop_id0 : loopid on fc loop 0 + * @fcloop_id1 : loopid on fc loop 1 + * @fcloop_state_counter : Indicates loop state changed + * @fcloop_state0 : state of fc loop 0 + * @fcloop_state1 : state of fc loop 1 + * @fcloop_state_rsvd : + */ +typedef struct { + uint32_t global_counter; + uint8_t param_counter; + uint8_t param_id; + uint16_t param_val; + uint8_t write_config_counter; + uint8_t write_config_rsvd[3]; + uint8_t ldrv_op_counter; + uint8_t ldrv_opid; + uint8_t ldrv_opcmd; + uint8_t ldrv_opstatus; + uint8_t ldrv_state_counter; + uint8_t ldrv_state_id; + uint8_t ldrv_state_new; + uint8_t ldrv_state_old; + uint8_t pdrv_state_counter; + uint8_t pdrv_state_id; + uint8_t pdrv_state_new; + uint8_t pdrv_state_old; + uint8_t pdrv_fmt_counter; + uint8_t pdrv_fmt_id; + uint8_t pdrv_fmt_val; + uint8_t pdrv_fmt_rsvd; + uint8_t targ_xfer_counter; + uint8_t targ_xfer_id; + uint8_t targ_xfer_val; + uint8_t targ_xfer_rsvd; + uint8_t fcloop_id_chg_counter; + uint8_t fcloopid_pdrvid; + uint8_t fcloop_id0; + uint8_t fcloop_id1; + uint8_t fcloop_state_counter; + uint8_t fcloop_state0; + uint8_t fcloop_state1; + uint8_t fcloop_state_rsvd; +} __attribute__ ((packed)) mraid_notify_t; + + +/** + * mraid_inquiry3_t - enquiry for device information + * + * @data_size : current size in bytes (not including resvd) + * @notify : + * @notify_rsvd : + * @rebuild_rate : rebuild rate (0% - 100%) + * @cache_flush_int : cache flush interval in seconds + * @sense_alert : + * @drive_insert_count : drive insertion count + * @battery_status : + * @num_ldrv : no. of Log Drives configured + * @recon_state : state of reconstruct + * @ldrv_op_status : logdrv Status + * @ldrv_size : size of each log drv + * @ldrv_prop : + * @ldrv_state : state of log drives + * @pdrv_state : state of phys drvs. + * @pdrv_format : + * @targ_xfer : phys device transfer rate + * @pad1k : 761 + 263reserved = 1024 bytes total size + */ +#define MAX_NOTIFY_SIZE 0x80 +#define CUR_NOTIFY_SIZE sizeof(mraid_notify_t) + +typedef struct { + uint32_t data_size; + + mraid_notify_t notify; + + uint8_t notify_rsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE]; + + uint8_t rebuild_rate; + uint8_t cache_flush_int; + uint8_t sense_alert; + uint8_t drive_insert_count; + + uint8_t battery_status; + uint8_t num_ldrv; + uint8_t recon_state[MAX_LOGICAL_DRIVES_40LD / 8]; + uint16_t ldrv_op_status[MAX_LOGICAL_DRIVES_40LD / 8]; + + uint32_t ldrv_size[MAX_LOGICAL_DRIVES_40LD]; + uint8_t ldrv_prop[MAX_LOGICAL_DRIVES_40LD]; + uint8_t ldrv_state[MAX_LOGICAL_DRIVES_40LD]; + uint8_t pdrv_state[FC_MAX_PHYSICAL_DEVICES]; + uint16_t pdrv_format[FC_MAX_PHYSICAL_DEVICES / 16]; + + uint8_t targ_xfer[80]; + uint8_t pad1k[263]; +} __attribute__ ((packed)) mraid_inquiry3_t; + + +/** + * mraid_adapinfo_t - information about the adapter + * @max_commands : max concurrent commands supported + * @rebuild_rate : rebuild rate - 0% thru 100% + * @max_targ_per_chan : max targ per channel + * @nchannels : number of channels on HBA + * @fw_version : firmware version + * @age_of_flash : number of times FW has been flashed + * @chip_set_value : contents of 0xC0000832 + * @dram_size : in MB + * @cache_flush_interval : in seconds + * @bios_version : + * @board_type : + * @sense_alert : + * @write_config_count : increase with every configuration change + * @drive_inserted_count : increase with every drive inserted + * @inserted_drive : channel:Id of inserted drive + * @battery_status : bit 0: battery module missing + * bit 1: VBAD + * bit 2: temprature high + * bit 3: battery pack missing + * bit 4,5: + * 00 - charge complete + * 01 - fast charge in progress + * 10 - fast charge fail + * 11 - undefined + * bit 6: counter > 1000 + * bit 7: Undefined + * @dec_fault_bus_info : + */ +typedef struct { + uint8_t max_commands; + uint8_t rebuild_rate; + uint8_t max_targ_per_chan; + uint8_t nchannels; + uint8_t fw_version[4]; + uint16_t age_of_flash; + uint8_t chip_set_value; + uint8_t dram_size; + uint8_t cache_flush_interval; + uint8_t bios_version[4]; + uint8_t board_type; + uint8_t sense_alert; + uint8_t write_config_count; + uint8_t battery_status; + uint8_t dec_fault_bus_info; +} __attribute__ ((packed)) mraid_adapinfo_t; + + +/** + * mraid_ldrv_info_t - information about the logical drives + * @nldrv : Number of logical drives configured + * @rsvd : + * @size : size of each logical drive + * @prop : + * @state : state of each logical drive + */ +typedef struct { + uint8_t nldrv; + uint8_t rsvd[3]; + uint32_t size[MAX_LOGICAL_DRIVES_8LD]; + uint8_t prop[MAX_LOGICAL_DRIVES_8LD]; + uint8_t state[MAX_LOGICAL_DRIVES_8LD]; +} __attribute__ ((packed)) mraid_ldrv_info_t; + + +/** + * mraid_pdrv_info_t - information about the physical drives + * @pdrv_state : state of each physical drive + */ +typedef struct { + uint8_t pdrv_state[MBOX_MAX_PHYSICAL_DRIVES]; + uint8_t rsvd; +} __attribute__ ((packed)) mraid_pdrv_info_t; + + +/** + * mraid_inquiry_t - RAID inquiry, mailbox command 0x05 + * @mraid_adapinfo_t : adapter information + * @mraid_ldrv_info_t : logical drives information + * @mraid_pdrv_info_t : physical drives information + */ +typedef struct { + mraid_adapinfo_t adapter_info; + mraid_ldrv_info_t logdrv_info; + mraid_pdrv_info_t pdrv_info; +} __attribute__ ((packed)) mraid_inquiry_t; + + +/** + * mraid_extinq_t - RAID extended inquiry, mailbox command 0x04 + * + * @raid_inq : raid inquiry + * @phys_drv_format : + * @stack_attn : + * @modem_status : + * @rsvd : + */ +typedef struct { + mraid_inquiry_t raid_inq; + uint16_t phys_drv_format[MAX_MBOX_CHANNELS]; + uint8_t stack_attn; + uint8_t modem_status; + uint8_t rsvd[2]; +} __attribute__ ((packed)) mraid_extinq_t; + + +/** + * adap_device_t - device information + * @channel : channel fpor the device + * @target : target ID of the device + */ +typedef struct { + uint8_t channel; + uint8_t target; +}__attribute__ ((packed)) adap_device_t; + + +/** + * adap_span_40ld_t - 40LD span + * @start_blk : starting block + * @num_blks : number of blocks + */ +typedef struct { + uint32_t start_blk; + uint32_t num_blks; + adap_device_t device[MAX_ROW_SIZE_40LD]; +}__attribute__ ((packed)) adap_span_40ld_t; + + +/** + * adap_span_8ld_t - 8LD span + * @start_blk : starting block + * @num_blks : number of blocks + */ +typedef struct { + uint32_t start_blk; + uint32_t num_blks; + adap_device_t device[MAX_ROW_SIZE_8LD]; +}__attribute__ ((packed)) adap_span_8ld_t; + + +/** + * logdrv_param_t - logical drives parameters + * + * @span_depth : total number of spans + * @level : RAID level + * @read_ahead : read ahead, no read ahead, adaptive read ahead + * @stripe_sz : encoded stripe size + * @status : status of the logical drive + * @write_mode : write mode, write_through/write_back + * @direct_io : direct io or through cache + * @row_size : number of stripes in a row + */ +typedef struct { + uint8_t span_depth; + uint8_t level; + uint8_t read_ahead; + uint8_t stripe_sz; + uint8_t status; + uint8_t write_mode; + uint8_t direct_io; + uint8_t row_size; +} __attribute__ ((packed)) logdrv_param_t; + + +/** + * logdrv_40ld_t - logical drive definition for 40LD controllers + * @lparam : logical drives parameters + * @span : span + */ +typedef struct { + logdrv_param_t lparam; + adap_span_40ld_t span[SPAN_DEPTH_8_SPANS]; +}__attribute__ ((packed)) logdrv_40ld_t; + + +/** + * logdrv_8ld_span8_t - logical drive definition for 8LD controllers + * @lparam : logical drives parameters + * @span : span + * + * 8-LD logical drive with upto 8 spans + */ +typedef struct { + logdrv_param_t lparam; + adap_span_8ld_t span[SPAN_DEPTH_8_SPANS]; +}__attribute__ ((packed)) logdrv_8ld_span8_t; + + +/** + * logdrv_8ld_span4_t - logical drive definition for 8LD controllers + * @lparam : logical drives parameters + * @span : span + * + * 8-LD logical drive with upto 4 spans + */ +typedef struct { + logdrv_param_t lparam; + adap_span_8ld_t span[SPAN_DEPTH_4_SPANS]; +}__attribute__ ((packed)) logdrv_8ld_span4_t; + + +/** + * phys_drive_t - physical device information + * @type : Type of the device + * @cur_status : current status of the device + * @tag_depth : Level of tagging + * @sync_neg : sync negotiation - ENABLE or DISBALE + * @size : configurable size in terms of 512 byte + */ +typedef struct { + uint8_t type; + uint8_t cur_status; + uint8_t tag_depth; + uint8_t sync_neg; + uint32_t size; +}__attribute__ ((packed)) phys_drive_t; + + +/** + * disk_array_40ld_t - disk array for 40LD controllers + * @numldrv : number of logical drives + * @resvd : + * @ldrv : logical drives information + * @pdrv : physical drives information + */ +typedef struct { + uint8_t numldrv; + uint8_t resvd[3]; + logdrv_40ld_t ldrv[MAX_LOGICAL_DRIVES_40LD]; + phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_40ld_t; + + +/** + * disk_array_8ld_span8_t - disk array for 8LD controllers + * @numldrv : number of logical drives + * @resvd : + * @ldrv : logical drives information + * @pdrv : physical drives information + * + * Disk array for 8LD logical drives with upto 8 spans + */ +typedef struct { + uint8_t numldrv; + uint8_t resvd[3]; + logdrv_8ld_span8_t ldrv[MAX_LOGICAL_DRIVES_8LD]; + phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_8ld_span8_t; + + +/** + * disk_array_8ld_span4_t - disk array for 8LD controllers + * @numldrv : number of logical drives + * @resvd : + * @ldrv : logical drives information + * @pdrv : physical drives information + * + * Disk array for 8LD logical drives with upto 4 spans + */ +typedef struct { + uint8_t numldrv; + uint8_t resvd[3]; + logdrv_8ld_span4_t ldrv[MAX_LOGICAL_DRIVES_8LD]; + phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES]; +}__attribute__ ((packed)) disk_array_8ld_span4_t; + + +/** + * private_bios_data - bios private data for boot devices + * @geometry : bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB, + * 0x1000 - 8GB, Others values are invalid + * @unused : bits 4-7 are unused + * @boot_drv : logical drive set as boot drive, 0..7 - for 8LD cards, + * 0..39 - for 40LD cards + * @cksum : 0-(sum of first 13 bytes of this structure) + */ +struct private_bios_data { + uint8_t geometry :4; + uint8_t unused :4; + uint8_t boot_drv; + uint8_t rsvd[12]; + uint16_t cksum; +} __attribute__ ((packed)); + + +/** + * mbox_sgl64 - 64-bit scatter list for mailbox based controllers + * @address : address of the buffer + * @length : data transfer length + */ +typedef struct { + uint64_t address; + uint32_t length; +} __attribute__ ((packed)) mbox_sgl64; + +/** + * mbox_sgl32 - 32-bit scatter list for mailbox based controllers + * @address : address of the buffer + * @length : data transfer length + */ +typedef struct { + uint32_t address; + uint32_t length; +} __attribute__ ((packed)) mbox_sgl32; + +#endif // _MRAID_MBOX_DEFS_H_ + +/* vim: set ts=8 sw=8 tw=78: */ diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h new file mode 100644 index 000000000000..18969a4946b7 --- /dev/null +++ b/drivers/scsi/megaraid/mega_common.h @@ -0,0 +1,286 @@ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : mega_common.h + * + * Libaray of common routine used by all low-level megaraid drivers + */ + +#ifndef _MEGA_COMMON_H_ +#define _MEGA_COMMON_H_ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/blkdev.h> +#include <linux/list.h> +#include <linux/version.h> +#include <linux/moduleparam.h> +#include <asm/semaphore.h> +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_host.h> + + +#define LSI_MAX_CHANNELS 16 +#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1) + + +/** + * scb_t - scsi command control block + * @param ccb : command control block for individual driver + * @param list : list of control blocks + * @param gp : general purpose field for LLDs + * @param sno : all SCBs have a serial number + * @param scp : associated scsi command + * @param state : current state of scb + * @param dma_dir : direction of data transfer + * @param dma_type : transfer with sg list, buffer, or no data transfer + * @param dev_channel : actual channel on the device + * @param dev_target : actual target on the device + * @param status : completion status + * + * This is our central data structure to issue commands the each driver. + * Driver specific data structures are maintained in the ccb field. + * scb provides a field 'gp', which can be used by LLD for its own purposes + * + * dev_channel and dev_target must be initialized with the actual channel and + * target on the controller. + */ +typedef struct { + caddr_t ccb; + struct list_head list; + unsigned long gp; + unsigned int sno; + struct scsi_cmnd *scp; + uint32_t state; + uint32_t dma_direction; + uint32_t dma_type; + uint16_t dev_channel; + uint16_t dev_target; + uint32_t status; +} scb_t; + +/* + * SCB states as it transitions from one state to another + */ +#define SCB_FREE 0x0000 /* on the free list */ +#define SCB_ACTIVE 0x0001 /* off the free list */ +#define SCB_PENDQ 0x0002 /* on the pending queue */ +#define SCB_ISSUED 0x0004 /* issued - owner f/w */ +#define SCB_ABORT 0x0008 /* Got an abort for this one */ +#define SCB_RESET 0x0010 /* Got a reset for this one */ + +/* + * DMA types for scb + */ +#define MRAID_DMA_NONE 0x0000 /* no data transfer for this command */ +#define MRAID_DMA_WSG 0x0001 /* data transfer using a sg list */ +#define MRAID_DMA_WBUF 0x0002 /* data transfer using a contiguous buffer */ + + +/** + * struct adapter_t - driver's initialization structure + * @param dpc_h : tasklet handle + * @param pdev : pci configuration pointer for kernel + * @param host : pointer to host structure of mid-layer + * @param host_lock : pointer to appropriate lock + * @param lock : synchronization lock for mid-layer and driver + * @param quiescent : driver is quiescent for now. + * @param outstanding_cmds : number of commands pending in the driver + * @param kscb_list : pointer to the bulk of SCBs pointers for IO + * @param kscb_pool : pool of free scbs for IO + * @param kscb_pool_lock : lock for pool of free scbs + * @param pend_list : pending commands list + * @param pend_list_lock : exlusion lock for pending commands list + * @param completed_list : list of completed commands + * @param completed_list_lock : exclusion lock for list of completed commands + * @param sglen : max sg elements supported + * @param device_ids : to convert kernel device addr to our devices. + * @param raid_device : raid adapter specific pointer + * @param max_channel : maximum channel number supported - inclusive + * @param max_target : max target supported - inclusive + * @param max_lun : max lun supported - inclusive + * @param unique_id : unique identifier for each adapter + * @param irq : IRQ for this adapter + * @param ito : internal timeout value, (-1) means no timeout + * @param ibuf : buffer to issue internal commands + * @param ibuf_dma_h : dma handle for the above buffer + * @param uscb_list : SCB pointers for user cmds, common mgmt module + * @param uscb_pool : pool of SCBs for user commands + * @param uscb_pool_lock : exclusion lock for these SCBs + * @param max_cmds : max outstanding commands + * @param fw_version : firmware version + * @param bios_version : bios version + * @param max_cdb_sz : biggest CDB size supported. + * @param ha : is high availability present - clustering + * @param init_id : initiator ID, the default value should be 7 + * @param max_sectors : max sectors per request + * @param cmd_per_lun : max outstanding commands per LUN + * @param being_detached : set when unloading, no more mgmt calls + * + * + * mraid_setup_device_map() can be called anytime after the device map is + * available and MRAID_GET_DEVICE_MAP() can be called whenever the mapping is + * required, usually from LLD's queue entry point. The formar API sets up the + * MRAID_IS_LOGICAL(adapter_t *, struct scsi_cmnd *) to find out if the + * device in question is a logical drive. + * + * quiescent flag should be set by the driver if it is not accepting more + * commands + * + * NOTE: The fields of this structures are placed to minimize cache misses + */ + +// amount of space required to store the bios and firmware version strings +#define VERSION_SIZE 16 + +typedef struct { + struct tasklet_struct dpc_h; + struct pci_dev *pdev; + struct Scsi_Host *host; + spinlock_t *host_lock; + spinlock_t lock; + uint8_t quiescent; + int outstanding_cmds; + scb_t *kscb_list; + struct list_head kscb_pool; + spinlock_t kscb_pool_lock; + struct list_head pend_list; + spinlock_t pend_list_lock; + struct list_head completed_list; + spinlock_t completed_list_lock; + uint16_t sglen; + int device_ids[LSI_MAX_CHANNELS] + [LSI_MAX_LOGICAL_DRIVES_64LD]; + caddr_t raid_device; + uint8_t max_channel; + uint16_t max_target; + uint8_t max_lun; + + uint32_t unique_id; + uint8_t irq; + uint8_t ito; + caddr_t ibuf; + dma_addr_t ibuf_dma_h; + scb_t *uscb_list; + struct list_head uscb_pool; + spinlock_t uscb_pool_lock; + int max_cmds; + uint8_t fw_version[VERSION_SIZE]; + uint8_t bios_version[VERSION_SIZE]; + uint8_t max_cdb_sz; + uint8_t ha; + uint16_t init_id; + uint16_t max_sectors; + uint16_t cmd_per_lun; + atomic_t being_detached; +} adapter_t; + +#define SCSI_FREE_LIST_LOCK(adapter) (&adapter->kscb_pool_lock) +#define USER_FREE_LIST_LOCK(adapter) (&adapter->uscb_pool_lock) +#define PENDING_LIST_LOCK(adapter) (&adapter->pend_list_lock) +#define COMPLETED_LIST_LOCK(adapter) (&adapter->completed_list_lock) + + +// conversion from scsi command +#define SCP2HOST(scp) (scp)->device->host // to host +#define SCP2HOSTDATA(scp) SCP2HOST(scp)->hostdata // to soft state +#define SCP2CHANNEL(scp) (scp)->device->channel // to channel +#define SCP2TARGET(scp) (scp)->device->id // to target +#define SCP2LUN(scp) (scp)->device->lun // to LUN + +// generic macro to convert scsi command and host to controller's soft state +#define SCSIHOST2ADAP(host) (((caddr_t *)(host->hostdata))[0]) +#define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp)) + + +/** + * MRAID_GET_DEVICE_MAP - device ids + * @param adp - Adapter's soft state + * @param scp - mid-layer scsi command pointer + * @param p_chan - physical channel on the controller + * @param target - target id of the device or logical drive number + * @param islogical - set if the command is for the logical drive + * + * Macro to retrieve information about device class, logical or physical and + * the corresponding physical channel and target or logical drive number + **/ +#define MRAID_IS_LOGICAL(adp, scp) \ + (SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0 + +#define MRAID_IS_LOGICAL_SDEV(adp, sdev) \ + (sdev->channel == (adp)->max_channel) ? 1 : 0 + +#define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \ + /* \ + * Is the request coming for the virtual channel \ + */ \ + islogical = MRAID_IS_LOGICAL(adp, scp); \ + \ + /* \ + * Get an index into our table of drive ids mapping \ + */ \ + if (islogical) { \ + p_chan = 0xFF; \ + target = \ + (adp)->device_ids[(adp)->max_channel][SCP2TARGET(scp)]; \ + } \ + else { \ + p_chan = ((adp)->device_ids[SCP2CHANNEL(scp)] \ + [SCP2TARGET(scp)] >> 8) & 0xFF; \ + target = ((adp)->device_ids[SCP2CHANNEL(scp)] \ + [SCP2TARGET(scp)] & 0xFF); \ + } + +/* + * ### Helper routines ### + */ +#define LSI_DBGLVL mraid_debug_level // each LLD must define a global + // mraid_debug_level + +#ifdef DEBUG +#if defined (_ASSERT_PANIC) +#define ASSERT_ACTION panic +#else +#define ASSERT_ACTION printk +#endif + +#define ASSERT(expression) \ + if (!(expression)) { \ + ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \ + #expression, __FILE__, __LINE__, __FUNCTION__); \ + } +#else +#define ASSERT(expression) +#endif + +/* + * struct mraid_pci_blk - structure holds DMA memory block info + * @param vaddr : virtual address to a memory block + * @param dma_addr : DMA handle to a memory block + * + * This structure is filled up for the caller. It is the responsibilty of the + * caller to allocate this array big enough to store addresses for all + * requested elements + */ +struct mraid_pci_blk { + caddr_t vaddr; + dma_addr_t dma_addr; +}; + +#endif // _MEGA_COMMON_H_ + +// vim: set ts=8 sw=8 tw=78: diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h new file mode 100644 index 000000000000..bdaee144a1c3 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_ioctl.h @@ -0,0 +1,296 @@ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : megaraid_ioctl.h + * + * Definitions to interface with user level applications + */ + +#ifndef _MEGARAID_IOCTL_H_ +#define _MEGARAID_IOCTL_H_ + +#include <linux/types.h> +#include <asm/semaphore.h> + +#include "mbox_defs.h" + +/** + * con_log() - console log routine + * @param level : indicates the severity of the message. + * @fparam mt : format string + * + * con_log displays the error messages on the console based on the current + * debug level. Also it attaches the appropriate kernel severity level with + * the message. + * + * + * consolge messages debug levels + */ +#define CL_ANN 0 /* print unconditionally, announcements */ +#define CL_DLEVEL1 1 /* debug level 1, informative */ +#define CL_DLEVEL2 2 /* debug level 2, verbose */ +#define CL_DLEVEL3 3 /* debug level 3, very verbose */ + +#define con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt; + +/* + * Definitions & Declarations needed to use common management module + */ + +#define MEGAIOC_MAGIC 'm' +#define MEGAIOCCMD _IOWR(MEGAIOC_MAGIC, 0, mimd_t) + +#define MEGAIOC_QNADAP 'm' /* Query # of adapters */ +#define MEGAIOC_QDRVRVER 'e' /* Query driver version */ +#define MEGAIOC_QADAPINFO 'g' /* Query adapter information */ + +#define USCSICMD 0x80 +#define UIOC_RD 0x00001 +#define UIOC_WR 0x00002 + +#define MBOX_CMD 0x00000 +#define GET_DRIVER_VER 0x10000 +#define GET_N_ADAP 0x20000 +#define GET_ADAP_INFO 0x30000 +#define GET_CAP 0x40000 +#define GET_STATS 0x50000 +#define GET_IOCTL_VERSION 0x01 + +#define EXT_IOCTL_SIGN_SZ 16 +#define EXT_IOCTL_SIGN "$$_EXTD_IOCTL_$$" + +#define MBOX_LEGACY 0x00 /* ioctl has legacy mbox*/ +#define MBOX_HPE 0x01 /* ioctl has hpe mbox */ + +#define APPTYPE_MIMD 0x00 /* old existing apps */ +#define APPTYPE_UIOC 0x01 /* new apps using uioc */ + +#define IOCTL_ISSUE 0x00000001 /* Issue ioctl */ +#define IOCTL_ABORT 0x00000002 /* Abort previous ioctl */ + +#define DRVRTYPE_MBOX 0x00000001 /* regular mbox driver */ +#define DRVRTYPE_HPE 0x00000002 /* new hpe driver */ + +#define MKADAP(adapno) (MEGAIOC_MAGIC << 8 | (adapno) ) +#define GETADAP(mkadap) ((mkadap) ^ MEGAIOC_MAGIC << 8) + +#define MAX_DMA_POOLS 5 /* 4k, 8k, 16k, 32k, 64k*/ + + +/** + * struct uioc_t - the common ioctl packet structure + * + * @signature : Must be "$$_EXTD_IOCTL_$$" + * @mb_type : Type of the mail box (MB_LEGACY or MB_HPE) + * @app_type : Type of the issuing application (existing or new) + * @opcode : Opcode of the command + * @adapno : Adapter number + * @cmdbuf : Pointer to buffer - can point to mbox or plain data buffer + * @xferlen : xferlen for DCMD and non mailbox commands + * @data_dir : Direction of the data transfer + * @status : Status from the driver + * @reserved : reserved bytes for future expansion + * + * @user_data : user data transfer address is saved in this + * @user_data_len: length of the data buffer sent by user app + * @user_pthru : user passthru address is saves in this (null if DCMD) + * @pthru32 : kernel address passthru (allocated per kioc) + * @pthru32_h : physicall address of @pthru32 + * @list : for kioc free pool list maintenance + * @done : call back routine for llds to call when kioc is completed + * @buf_vaddr : dma pool buffer attached to kioc for data transfer + * @buf_paddr : physical address of the dma pool buffer + * @pool_index : index of the dma pool that @buf_vaddr is taken from + * @free_buf : indicates if buffer needs to be freed after kioc completes + * + * Note : All LSI drivers understand only this packet. Any other + * : format sent by applications would be converted to this. + */ +typedef struct uioc { + +/* User Apps: */ + + uint8_t signature[EXT_IOCTL_SIGN_SZ]; + uint16_t mb_type; + uint16_t app_type; + uint32_t opcode; + uint32_t adapno; + uint64_t cmdbuf; + uint32_t xferlen; + uint32_t data_dir; + int32_t status; + uint8_t reserved[128]; + +/* Driver Data: */ + void __user * user_data; + uint32_t user_data_len; + mraid_passthru_t __user *user_pthru; + + mraid_passthru_t *pthru32; + dma_addr_t pthru32_h; + + struct list_head list; + void (*done)(struct uioc*); + + caddr_t buf_vaddr; + dma_addr_t buf_paddr; + int8_t pool_index; + uint8_t free_buf; + + uint8_t timedout; + +} __attribute__ ((aligned(1024),packed)) uioc_t; + + +/** + * struct mraid_hba_info - information about the controller + * + * @param pci_vendor_id : PCI vendor id + * @param pci_device_id : PCI device id + * @param subsystem_vendor_id : PCI subsystem vendor id + * @param subsystem_device_id : PCI subsystem device id + * @param baseport : base port of hba memory + * @param pci_bus : PCI bus + * @param pci_dev_fn : PCI device/function values + * @param irq : interrupt vector for the device + * + * Extended information of 256 bytes about the controller. Align on the single + * byte boundary so that 32-bit applications can be run on 64-bit platform + * drivers withoug re-compilation. + * NOTE: reduce the number of reserved bytes whenever new field are added, so + * that total size of the structure remains 256 bytes. + */ +typedef struct mraid_hba_info { + + uint16_t pci_vendor_id; + uint16_t pci_device_id; + uint16_t subsys_vendor_id; + uint16_t subsys_device_id; + + uint64_t baseport; + uint8_t pci_bus; + uint8_t pci_dev_fn; + uint8_t pci_slot; + uint8_t irq; + + uint32_t unique_id; + uint32_t host_no; + + uint8_t num_ldrv; +} __attribute__ ((aligned(256), packed)) mraid_hba_info_t; + + +/** + * mcontroller : adapter info structure for old mimd_t apps + * + * @base : base address + * @irq : irq number + * @numldrv : number of logical drives + * @pcibus : pci bus + * @pcidev : pci device + * @pcifun : pci function + * @pciid : pci id + * @pcivendor : vendor id + * @pcislot : slot number + * @uid : unique id + */ +typedef struct mcontroller { + + uint64_t base; + uint8_t irq; + uint8_t numldrv; + uint8_t pcibus; + uint16_t pcidev; + uint8_t pcifun; + uint16_t pciid; + uint16_t pcivendor; + uint8_t pcislot; + uint32_t uid; + +} __attribute__ ((packed)) mcontroller_t; + + +/** + * mm_dmapool_t : Represents one dma pool with just one buffer + * + * @vaddr : Virtual address + * @paddr : DMA physicall address + * @bufsize : In KB - 4 = 4k, 8 = 8k etc. + * @handle : Handle to the dma pool + * @lock : lock to synchronize access to the pool + * @in_use : If pool already in use, attach new block + */ +typedef struct mm_dmapool { + caddr_t vaddr; + dma_addr_t paddr; + uint32_t buf_size; + struct dma_pool *handle; + spinlock_t lock; + uint8_t in_use; +} mm_dmapool_t; + + +/** + * mraid_mmadp_t: Structure that drivers pass during (un)registration + * + * @unique_id : Any unique id (usually PCI bus+dev+fn) + * @drvr_type : megaraid or hpe (DRVRTYPE_MBOX or DRVRTYPE_HPE) + * @drv_data : Driver specific; not touched by the common module + * @timeout : timeout for issued kiocs + * @max_kioc : Maximum ioctl packets acceptable by the lld + * @pdev : pci dev; used for allocating dma'ble memory + * @issue_uioc : Driver supplied routine to issue uioc_t commands + * : issue_uioc(drvr_data, kioc, ISSUE/ABORT, uioc_done) + * @quiescent : flag to indicate if ioctl can be issued to this adp + * @list : attach with the global list of adapters + * @kioc_list : block of mem for @max_kioc number of kiocs + * @kioc_pool : pool of free kiocs + * @kioc_pool_lock : protection for free pool + * @kioc_semaphore : so as not to exceed @max_kioc parallel ioctls + * @mbox_list : block of mem for @max_kioc number of mboxes + * @pthru_dma_pool : DMA pool to allocate passthru packets + * @dma_pool_list : array of dma pools + */ + +typedef struct mraid_mmadp { + +/* Filled by driver */ + + uint32_t unique_id; + uint32_t drvr_type; + unsigned long drvr_data; + uint16_t timeout; + uint8_t max_kioc; + + struct pci_dev *pdev; + + int(*issue_uioc)(unsigned long, uioc_t *, uint32_t); + +/* Maintained by common module */ + uint32_t quiescent; + + struct list_head list; + uioc_t *kioc_list; + struct list_head kioc_pool; + spinlock_t kioc_pool_lock; + struct semaphore kioc_semaphore; + + mbox64_t *mbox_list; + struct dma_pool *pthru_dma_pool; + mm_dmapool_t dma_pool_list[MAX_DMA_POOLS]; + +} mraid_mmadp_t; + +int mraid_mm_register_adp(mraid_mmadp_t *); +int mraid_mm_unregister_adp(uint32_t); +uint32_t mraid_mm_adapter_app_handle(uint32_t); + +#endif /* _MEGARAID_IOCTL_H_ */ diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c new file mode 100644 index 000000000000..138fa4815833 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -0,0 +1,4276 @@ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : megaraid_mbox.c + * Version : v2.20.4.5 (Feb 03 2005) + * + * Authors: + * Atul Mukker <Atul.Mukker@lsil.com> + * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com> + * Manoj Jose <Manoj.Jose@lsil.com> + * + * List of supported controllers + * + * OEM Product Name VID DID SSVID SSID + * --- ------------ --- --- ---- ---- + * Dell PERC3/QC 101E 1960 1028 0471 + * Dell PERC3/DC 101E 1960 1028 0493 + * Dell PERC3/SC 101E 1960 1028 0475 + * Dell PERC3/Di 1028 1960 1028 0123 + * Dell PERC4/SC 1000 1960 1028 0520 + * Dell PERC4/DC 1000 1960 1028 0518 + * Dell PERC4/QC 1000 0407 1028 0531 + * Dell PERC4/Di 1028 000F 1028 014A + * Dell PERC 4e/Si 1028 0013 1028 016c + * Dell PERC 4e/Di 1028 0013 1028 016d + * Dell PERC 4e/Di 1028 0013 1028 016e + * Dell PERC 4e/Di 1028 0013 1028 016f + * Dell PERC 4e/Di 1028 0013 1028 0170 + * Dell PERC 4e/DC 1000 0408 1028 0002 + * Dell PERC 4e/SC 1000 0408 1028 0001 + * + * + * LSI MegaRAID SCSI 320-0 1000 1960 1000 A520 + * LSI MegaRAID SCSI 320-1 1000 1960 1000 0520 + * LSI MegaRAID SCSI 320-2 1000 1960 1000 0518 + * LSI MegaRAID SCSI 320-0X 1000 0407 1000 0530 + * LSI MegaRAID SCSI 320-2X 1000 0407 1000 0532 + * LSI MegaRAID SCSI 320-4X 1000 0407 1000 0531 + * LSI MegaRAID SCSI 320-1E 1000 0408 1000 0001 + * LSI MegaRAID SCSI 320-2E 1000 0408 1000 0002 + * LSI MegaRAID SATA 150-4 1000 1960 1000 4523 + * LSI MegaRAID SATA 150-6 1000 1960 1000 0523 + * LSI MegaRAID SATA 300-4X 1000 0409 1000 3004 + * LSI MegaRAID SATA 300-8X 1000 0409 1000 3008 + * + * INTEL RAID Controller SRCU42X 1000 0407 8086 0532 + * INTEL RAID Controller SRCS16 1000 1960 8086 0523 + * INTEL RAID Controller SRCU42E 1000 0408 8086 0002 + * INTEL RAID Controller SRCZCRX 1000 0407 8086 0530 + * INTEL RAID Controller SRCS28X 1000 0409 8086 3008 + * INTEL RAID Controller SROMBU42E 1000 0408 8086 3431 + * INTEL RAID Controller SROMBU42E 1000 0408 8086 3499 + * INTEL RAID Controller SRCU51L 1000 1960 8086 0520 + * + * FSC MegaRAID PCI Express ROMB 1000 0408 1734 1065 + * + * ACER MegaRAID ROMB-2E 1000 0408 1025 004D + * + * NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287 + * + * For history of changes, see Documentation/ChangeLog.megaraid + */ + +#include "megaraid_mbox.h" + +static int megaraid_init(void); +static void megaraid_exit(void); + +static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *); +static void megaraid_detach_one(struct pci_dev *); +static void megaraid_mbox_shutdown(struct device *); + +static int megaraid_io_attach(adapter_t *); +static void megaraid_io_detach(adapter_t *); + +static int megaraid_init_mbox(adapter_t *); +static void megaraid_fini_mbox(adapter_t *); + +static int megaraid_alloc_cmd_packets(adapter_t *); +static void megaraid_free_cmd_packets(adapter_t *); + +static int megaraid_mbox_setup_dma_pools(adapter_t *); +static void megaraid_mbox_teardown_dma_pools(adapter_t *); + +static int megaraid_sysfs_alloc_resources(adapter_t *); +static void megaraid_sysfs_free_resources(adapter_t *); + +static int megaraid_abort_handler(struct scsi_cmnd *); +static int megaraid_reset_handler(struct scsi_cmnd *); + +static int mbox_post_sync_cmd(adapter_t *, uint8_t []); +static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []); +static int megaraid_busywait_mbox(mraid_device_t *); +static int megaraid_mbox_product_info(adapter_t *); +static int megaraid_mbox_extended_cdb(adapter_t *); +static int megaraid_mbox_support_ha(adapter_t *, uint16_t *); +static int megaraid_mbox_support_random_del(adapter_t *); +static int megaraid_mbox_get_max_sg(adapter_t *); +static void megaraid_mbox_enum_raid_scsi(adapter_t *); +static void megaraid_mbox_flush_cache(adapter_t *); + +static void megaraid_mbox_display_scb(adapter_t *, scb_t *); +static void megaraid_mbox_setup_device_map(adapter_t *); + +static int megaraid_queue_command(struct scsi_cmnd *, + void (*)(struct scsi_cmnd *)); +static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *); +static void megaraid_mbox_runpendq(adapter_t *, scb_t *); +static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *, + struct scsi_cmnd *); +static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *, + struct scsi_cmnd *); + +static irqreturn_t megaraid_isr(int, void *, struct pt_regs *); + +static void megaraid_mbox_dpc(unsigned long); + +static ssize_t megaraid_sysfs_show_app_hndl(struct class_device *, char *); +static ssize_t megaraid_sysfs_show_ldnum(struct device *, char *); + +static int megaraid_cmm_register(adapter_t *); +static int megaraid_cmm_unregister(adapter_t *); +static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t); +static int megaraid_mbox_mm_command(adapter_t *, uioc_t *); +static void megaraid_mbox_mm_done(adapter_t *, scb_t *); +static int gather_hbainfo(adapter_t *, mraid_hba_info_t *); +static int wait_till_fw_empty(adapter_t *); + + + +MODULE_AUTHOR("LSI Logic Corporation"); +MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(MEGARAID_VERSION); + +/* + * ### modules parameters for driver ### + */ + +/** + * Set to enable driver to expose unconfigured disk to kernel + */ +static int megaraid_expose_unconf_disks = 0; +module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0); +MODULE_PARM_DESC(unconf_disks, + "Set to expose unconfigured disks to kernel (default=0)"); + +/** + * driver wait time if the adapter's mailbox is busy + */ +static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT; +module_param_named(busy_wait, max_mbox_busy_wait, int, 0); +MODULE_PARM_DESC(busy_wait, + "Max wait for mailbox in microseconds if busy (default=10)"); + +/** + * number of sectors per IO command + */ +static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS; +module_param_named(max_sectors, megaraid_max_sectors, int, 0); +MODULE_PARM_DESC(max_sectors, + "Maximum number of sectors per IO command (default=128)"); + +/** + * number of commands per logical unit + */ +static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN; +module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0); +MODULE_PARM_DESC(cmd_per_lun, + "Maximum number of commands per logical unit (default=64)"); + + +/** + * Fast driver load option, skip scanning for physical devices during load. + * This would result in non-disk devices being skipped during driver load + * time. These can be later added though, using /proc/scsi/scsi + */ +static unsigned int megaraid_fast_load = 0; +module_param_named(fast_load, megaraid_fast_load, int, 0); +MODULE_PARM_DESC(fast_load, + "Faster loading of the driver, skips physical devices! (default=0)"); + + +/** + * mraid_debug level - threshold for amount of information to be displayed by + * the driver. This level can be changed through modules parameters, ioctl or + * sysfs/proc interface. By default, print the announcement messages only. + */ +int mraid_debug_level = CL_ANN; +module_param_named(debug_level, mraid_debug_level, int, 0); +MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)"); + +/* + * ### global data ### + */ +static uint8_t megaraid_mbox_version[8] = + { 0x02, 0x20, 0x04, 0x05, 2, 3, 20, 5 }; + + +/* + * PCI table for all supported controllers. + */ +static struct pci_device_id pci_id_table_g[] = { + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4_DI_DISCOVERY, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4_DI_DISCOVERY, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_PERC4_SC, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4_SC, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_PERC4_DC, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4_DC, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_PERC4_QC, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4_QC, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4_DI_EVERGLADES, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4_DI_EVERGLADES, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4E_SI_BIGBEND, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_SI_BIGBEND, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4E_DI_KOBUK, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_DI_KOBUK, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4E_DI_CORVETTE, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_DI_CORVETTE, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4E_DI_EXPEDITION, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION, + }, + { + PCI_VENDOR_ID_DELL, + PCI_DEVICE_ID_PERC4E_DI_GUADALUPE, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_PERC4E_DC_320_2E, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_DC_320_2E, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_PERC4E_SC_320_1E, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC4E_SC_320_1E, + }, + { + PCI_VENDOR_ID_AMI, + PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC3_QC, + }, + { + PCI_VENDOR_ID_AMI, + PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC3_DC, + }, + { + PCI_VENDOR_ID_AMI, + PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_VENDOR_ID_DELL, + PCI_SUBSYS_ID_PERC3_SC, + }, + { + PCI_VENDOR_ID_AMI, + PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_VENDOR_ID_AMI, + PCI_SUBSYS_ID_PERC3_SC, + }, + { + PCI_VENDOR_ID_AMI, + PCI_DEVICE_ID_AMI_MEGARAID3, + PCI_VENDOR_ID_AMI, + PCI_SUBSYS_ID_PERC3_DC, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SCSI_320_0, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SCSI_320_0, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SCSI_320_1, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SCSI_320_1, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SCSI_320_2, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SCSI_320_2, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SCSI_320_0x, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SCSI_320_0x, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SCSI_320_2x, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SCSI_320_2x, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SCSI_320_4x, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SCSI_320_4x, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SCSI_320_1E, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SCSI_320_1E, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SCSI_320_2E, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SCSI_320_2E, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_I4_133_RAID, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_I4_133_RAID, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SATA_150_4, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SATA_150_4, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SATA_150_6, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SATA_150_6, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SATA_300_4x, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SATA_300_4x, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_SATA_300_8x, + PCI_VENDOR_ID_LSI_LOGIC, + PCI_SUBSYS_ID_MEGARAID_SATA_300_8x, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_INTEL_RAID_SRCU42X, + PCI_VENDOR_ID_INTEL, + PCI_SUBSYS_ID_INTEL_RAID_SRCU42X, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_INTEL_RAID_SRCS16, + PCI_VENDOR_ID_INTEL, + PCI_SUBSYS_ID_INTEL_RAID_SRCS16, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_INTEL_RAID_SRCU42E, + PCI_VENDOR_ID_INTEL, + PCI_SUBSYS_ID_INTEL_RAID_SRCU42E, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_INTEL_RAID_SRCZCRX, + PCI_VENDOR_ID_INTEL, + PCI_SUBSYS_ID_INTEL_RAID_SRCZCRX, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_INTEL_RAID_SRCS28X, + PCI_VENDOR_ID_INTEL, + PCI_SUBSYS_ID_INTEL_RAID_SRCS28X, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_INTEL_RAID_SROMBU42E_ALIEF, + PCI_VENDOR_ID_INTEL, + PCI_SUBSYS_ID_INTEL_RAID_SROMBU42E_ALIEF, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_INTEL_RAID_SROMBU42E_HARWICH, + PCI_VENDOR_ID_INTEL, + PCI_SUBSYS_ID_INTEL_RAID_SROMBU42E_HARWICH, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK, + PCI_VENDOR_ID_INTEL, + PCI_SUBSYS_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_FSC_MEGARAID_PCI_EXPRESS_ROMB, + PCI_SUBSYS_ID_FSC, + PCI_SUBSYS_ID_FSC_MEGARAID_PCI_EXPRESS_ROMB, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_ACER_ROMB_2E, + PCI_VENDOR_ID_AI, + PCI_SUBSYS_ID_MEGARAID_ACER_ROMB_2E, + }, + { + PCI_VENDOR_ID_LSI_LOGIC, + PCI_DEVICE_ID_MEGARAID_NEC_ROMB_2E, + PCI_VENDOR_ID_NEC, + PCI_SUBSYS_ID_MEGARAID_NEC_ROMB_2E, + }, + {0} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, pci_id_table_g); + + +static struct pci_driver megaraid_pci_driver_g = { + .name = "megaraid", + .id_table = pci_id_table_g, + .probe = megaraid_probe_one, + .remove = __devexit_p(megaraid_detach_one), + .driver = { + .shutdown = megaraid_mbox_shutdown, + } +}; + + + +// definitions for the device attributes for exporting logical drive number +// for a scsi address (Host, Channel, Id, Lun) + +CLASS_DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl, + NULL); + +// Host template initializer for megaraid mbox sysfs device attributes +static struct class_device_attribute *megaraid_shost_attrs[] = { + &class_device_attr_megaraid_mbox_app_hndl, + NULL, +}; + + +DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL); + +// Host template initializer for megaraid mbox sysfs device attributes +static struct device_attribute *megaraid_sdev_attrs[] = { + &dev_attr_megaraid_mbox_ld, + NULL, +}; + + +/* + * Scsi host template for megaraid unified driver + */ +static struct scsi_host_template megaraid_template_g = { + .module = THIS_MODULE, + .name = "LSI Logic MegaRAID driver", + .proc_name = "megaraid", + .queuecommand = megaraid_queue_command, + .eh_abort_handler = megaraid_abort_handler, + .eh_device_reset_handler = megaraid_reset_handler, + .eh_bus_reset_handler = megaraid_reset_handler, + .eh_host_reset_handler = megaraid_reset_handler, + .use_clustering = ENABLE_CLUSTERING, + .sdev_attrs = megaraid_sdev_attrs, + .shost_attrs = megaraid_shost_attrs, +}; + + +/** + * megaraid_init - module load hook + * + * We register ourselves as hotplug enabled module and let PCI subsystem + * discover our adaters + **/ +static int __init +megaraid_init(void) +{ + int rval; + + // Announce the driver version + con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION, + MEGARAID_EXT_VERSION)); + + // check validity of module parameters + if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid mailbox: max commands per lun reset to %d\n", + MBOX_MAX_SCSI_CMDS)); + + megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS; + } + + + // register as a PCI hot-plug driver module + if ((rval = pci_module_init(&megaraid_pci_driver_g))) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: could not register hotplug support.\n")); + } + + return rval; +} + + +/** + * megaraid_exit - driver unload entry point + * + * We simply unwrap the megaraid_init routine here + */ +static void __exit +megaraid_exit(void) +{ + con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n")); + + // unregister as PCI hotplug driver + pci_unregister_driver(&megaraid_pci_driver_g); + + return; +} + + +/** + * megaraid_probe_one - PCI hotplug entry point + * @param pdev : handle to this controller's PCI configuration space + * @param id : pci device id of the class of controllers + * + * This routine should be called whenever a new adapter is detected by the + * PCI hotplug susbsytem. + **/ +static int __devinit +megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) +{ + adapter_t *adapter; + + + // detected a new controller + con_log(CL_ANN, (KERN_INFO + "megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device)); + + con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number, + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn))); + + if (pci_enable_device(pdev)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: pci_enable_device failed\n")); + + return -ENODEV; + } + + // Enable bus-mastering on this controller + pci_set_master(pdev); + + // Allocate the per driver initialization structure + adapter = kmalloc(sizeof(adapter_t), GFP_KERNEL); + + if (adapter == NULL) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d.\n", __FUNCTION__, __LINE__)); + + goto out_probe_one; + } + memset(adapter, 0, sizeof(adapter_t)); + + + // set up PCI related soft state and other pre-known parameters + adapter->unique_id = pdev->bus->number << 8 | pdev->devfn; + adapter->irq = pdev->irq; + adapter->pdev = pdev; + + atomic_set(&adapter->being_detached, 0); + + // Setup the default DMA mask. This would be changed later on + // depending on hardware capabilities + if (pci_set_dma_mask(adapter->pdev, 0xFFFFFFFF) != 0) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: pci_set_dma_mask failed:%d\n", __LINE__)); + + goto out_free_adapter; + } + + + // Initialize the synchronization lock for kernel and LLD + spin_lock_init(&adapter->lock); + adapter->host_lock = &adapter->lock; + + + // Initialize the command queues: the list of free SCBs and the list + // of pending SCBs. + INIT_LIST_HEAD(&adapter->kscb_pool); + spin_lock_init(SCSI_FREE_LIST_LOCK(adapter)); + + INIT_LIST_HEAD(&adapter->pend_list); + spin_lock_init(PENDING_LIST_LOCK(adapter)); + + INIT_LIST_HEAD(&adapter->completed_list); + spin_lock_init(COMPLETED_LIST_LOCK(adapter)); + + + // Start the mailbox based controller + if (megaraid_init_mbox(adapter) != 0) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: maibox adapter did not initialize\n")); + + goto out_free_adapter; + } + + // Register with LSI Common Management Module + if (megaraid_cmm_register(adapter) != 0) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: could not register with management module\n")); + + goto out_fini_mbox; + } + + // setup adapter handle in PCI soft state + pci_set_drvdata(pdev, adapter); + + // attach with scsi mid-layer + if (megaraid_io_attach(adapter) != 0) { + + con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n")); + + goto out_cmm_unreg; + } + + return 0; + +out_cmm_unreg: + pci_set_drvdata(pdev, NULL); + megaraid_cmm_unregister(adapter); +out_fini_mbox: + megaraid_fini_mbox(adapter); +out_free_adapter: + kfree(adapter); +out_probe_one: + pci_disable_device(pdev); + + return -ENODEV; +} + + +/** + * megaraid_detach_one - release the framework resources and call LLD release + * routine + * @param pdev : handle for our PCI cofiguration space + * + * This routine is called during driver unload. We free all the allocated + * resources and call the corresponding LLD so that it can also release all + * its resources. + * + * This routine is also called from the PCI hotplug system + **/ +static void +megaraid_detach_one(struct pci_dev *pdev) +{ + adapter_t *adapter; + struct Scsi_Host *host; + + + // Start a rollback on this adapter + adapter = pci_get_drvdata(pdev); + + if (!adapter) { + con_log(CL_ANN, (KERN_CRIT + "megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device)); + + return; + } + else { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device)); + } + + + host = adapter->host; + + // do not allow any more requests from the management module for this + // adapter. + // FIXME: How do we account for the request which might still be + // pending with us? + atomic_set(&adapter->being_detached, 1); + + // detach from the IO sub-system + megaraid_io_detach(adapter); + + // reset the device state in the PCI structure. We check this + // condition when we enter here. If the device state is NULL, + // that would mean the device has already been removed + pci_set_drvdata(pdev, NULL); + + // Unregister from common management module + // + // FIXME: this must return success or failure for conditions if there + // is a command pending with LLD or not. + megaraid_cmm_unregister(adapter); + + // finalize the mailbox based controller and release all resources + megaraid_fini_mbox(adapter); + + kfree(adapter); + + scsi_host_put(host); + + pci_disable_device(pdev); + + return; +} + + +/** + * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA + * @param device : generice driver model device + * + * Shutdown notification, perform flush cache + */ +static void +megaraid_mbox_shutdown(struct device *device) +{ + adapter_t *adapter = pci_get_drvdata(to_pci_dev(device)); + static int counter; + + if (!adapter) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: null device in shutdown\n")); + return; + } + + // flush caches now + con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...", + counter++)); + + megaraid_mbox_flush_cache(adapter); + + con_log(CL_ANN, ("done\n")); +} + + +/** + * megaraid_io_attach - attach a device with the IO subsystem + * @param adapter : controller's soft state + * + * Attach this device with the IO subsystem + **/ +static int +megaraid_io_attach(adapter_t *adapter) +{ + struct Scsi_Host *host; + + // Initialize SCSI Host structure + host = scsi_host_alloc(&megaraid_template_g, 8); + if (!host) { + con_log(CL_ANN, (KERN_WARNING + "megaraid mbox: scsi_register failed\n")); + + return -1; + } + + SCSIHOST2ADAP(host) = (caddr_t)adapter; + adapter->host = host; + + // export the parameters required by the mid-layer + scsi_assign_lock(host, adapter->host_lock); + scsi_set_device(host, &adapter->pdev->dev); + + host->irq = adapter->irq; + host->unique_id = adapter->unique_id; + host->can_queue = adapter->max_cmds; + host->this_id = adapter->init_id; + host->sg_tablesize = adapter->sglen; + host->max_sectors = adapter->max_sectors; + host->cmd_per_lun = adapter->cmd_per_lun; + host->max_channel = adapter->max_channel; + host->max_id = adapter->max_target; + host->max_lun = adapter->max_lun; + + + // notify mid-layer about the new controller + if (scsi_add_host(host, &adapter->pdev->dev)) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid mbox: scsi_add_host failed\n")); + + scsi_host_put(host); + + return -1; + } + + scsi_scan_host(host); + + return 0; +} + + +/** + * megaraid_io_detach - detach a device from the IO subsystem + * @param adapter : controller's soft state + * + * Detach this device from the IO subsystem + **/ +static void +megaraid_io_detach(adapter_t *adapter) +{ + struct Scsi_Host *host; + + con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n")); + + host = adapter->host; + + scsi_remove_host(host); + + return; +} + + +/* + * START: Mailbox Low Level Driver + * + * This is section specific to the single mailbox based controllers + */ + +/** + * megaraid_init_mbox - initialize controller + * @param adapter - our soft state + * + * . Allocate 16-byte aligned mailbox memory for firmware handshake + * . Allocate controller's memory resources + * . Find out all initialization data + * . Allocate memory required for all the commands + * . Use internal library of FW routines, build up complete soft state + */ +static int __init +megaraid_init_mbox(adapter_t *adapter) +{ + struct pci_dev *pdev; + mraid_device_t *raid_dev; + int i; + + + adapter->ito = MBOX_TIMEOUT; + pdev = adapter->pdev; + + /* + * Allocate and initialize the init data structure for mailbox + * controllers + */ + raid_dev = kmalloc(sizeof(mraid_device_t), GFP_KERNEL); + if (raid_dev == NULL) return -1; + + memset(raid_dev, 0, sizeof(mraid_device_t)); + + /* + * Attach the adapter soft state to raid device soft state + */ + adapter->raid_device = (caddr_t)raid_dev; + raid_dev->fast_load = megaraid_fast_load; + + + // our baseport + raid_dev->baseport = pci_resource_start(pdev, 0); + + if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: mem region busy\n")); + + goto out_free_raid_dev; + } + + raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128); + + if (!raid_dev->baseaddr) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: could not map hba memory\n") ); + + goto out_release_regions; + } + + // + // Setup the rest of the soft state using the library of FW routines + // + + // request IRQ and register the interrupt service routine + if (request_irq(adapter->irq, megaraid_isr, SA_SHIRQ, "megaraid", + adapter)) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: Couldn't register IRQ %d!\n", adapter->irq)); + + goto out_iounmap; + } + + + // initialize the mutual exclusion lock for the mailbox + spin_lock_init(&raid_dev->mailbox_lock); + + // allocate memory required for commands + if (megaraid_alloc_cmd_packets(adapter) != 0) { + goto out_free_irq; + } + + // Product info + if (megaraid_mbox_product_info(adapter) != 0) { + goto out_alloc_cmds; + } + + // Do we support extended CDBs + adapter->max_cdb_sz = 10; + if (megaraid_mbox_extended_cdb(adapter) == 0) { + adapter->max_cdb_sz = 16; + } + + /* + * Do we support cluster environment, if we do, what is the initiator + * id. + * NOTE: In a non-cluster aware firmware environment, the LLD should + * return 7 as initiator id. + */ + adapter->ha = 0; + adapter->init_id = -1; + if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) { + adapter->ha = 1; + } + + /* + * Prepare the device ids array to have the mapping between the kernel + * device address and megaraid device address. + * We export the physical devices on their actual addresses. The + * logical drives are exported on a virtual SCSI channel + */ + megaraid_mbox_setup_device_map(adapter); + + // If the firmware supports random deletion, update the device id map + if (megaraid_mbox_support_random_del(adapter)) { + + // Change the logical drives numbers in device_ids array one + // slot in device_ids is reserved for target id, that's why + // "<=" below + for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) { + adapter->device_ids[adapter->max_channel][i] += 0x80; + } + adapter->device_ids[adapter->max_channel][adapter->init_id] = + 0xFF; + + raid_dev->random_del_supported = 1; + } + + /* + * find out the maximum number of scatter-gather elements supported by + * this firmware + */ + adapter->sglen = megaraid_mbox_get_max_sg(adapter); + + // enumerate RAID and SCSI channels so that all devices on SCSI + // channels can later be exported, including disk devices + megaraid_mbox_enum_raid_scsi(adapter); + + /* + * Other parameters required by upper layer + * + * maximum number of sectors per IO command + */ + adapter->max_sectors = megaraid_max_sectors; + + /* + * number of queued commands per LUN. + */ + adapter->cmd_per_lun = megaraid_cmd_per_lun; + + /* + * Allocate resources required to issue FW calls, when sysfs is + * accessed + */ + if (megaraid_sysfs_alloc_resources(adapter) != 0) { + goto out_alloc_cmds; + } + + // Set the DMA mask to 64-bit. All supported controllers as capable of + // DMA in this range + if (pci_set_dma_mask(adapter->pdev, 0xFFFFFFFFFFFFFFFFULL) != 0) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: could not set DMA mask for 64-bit.\n")); + + goto out_free_sysfs_res; + } + + // setup tasklet for DPC + tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc, + (unsigned long)adapter); + + con_log(CL_DLEVEL1, (KERN_INFO + "megaraid mbox hba successfully initialized\n")); + + return 0; + +out_free_sysfs_res: + megaraid_sysfs_free_resources(adapter); +out_alloc_cmds: + megaraid_free_cmd_packets(adapter); +out_free_irq: + free_irq(adapter->irq, adapter); +out_iounmap: + iounmap(raid_dev->baseaddr); +out_release_regions: + pci_release_regions(pdev); +out_free_raid_dev: + kfree(raid_dev); + + return -1; +} + + +/** + * megaraid_fini_mbox - undo controller initialization + * @param adapter : our soft state + */ +static void +megaraid_fini_mbox(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + + // flush all caches + megaraid_mbox_flush_cache(adapter); + + tasklet_kill(&adapter->dpc_h); + + megaraid_sysfs_free_resources(adapter); + + megaraid_free_cmd_packets(adapter); + + free_irq(adapter->irq, adapter); + + iounmap(raid_dev->baseaddr); + + pci_release_regions(adapter->pdev); + + kfree(raid_dev); + + return; +} + + +/** + * megaraid_alloc_cmd_packets - allocate shared mailbox + * @param adapter : soft state of the raid controller + * + * Allocate and align the shared mailbox. This maibox is used to issue + * all the commands. For IO based controllers, the mailbox is also regsitered + * with the FW. Allocate memory for all commands as well. + * This is our big allocator + */ +static int +megaraid_alloc_cmd_packets(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + struct pci_dev *pdev; + unsigned long align; + scb_t *scb; + mbox_ccb_t *ccb; + struct mraid_pci_blk *epthru_pci_blk; + struct mraid_pci_blk *sg_pci_blk; + struct mraid_pci_blk *mbox_pci_blk; + int i; + + pdev = adapter->pdev; + + /* + * Setup the mailbox + * Allocate the common 16-byte aligned memory for the handshake + * mailbox. + */ + raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev, + sizeof(mbox64_t), &raid_dev->una_mbox64_dma); + + if (!raid_dev->una_mbox64) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __FUNCTION__, + __LINE__)); + return -1; + } + memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t)); + + /* + * Align the mailbox at 16-byte boundary + */ + raid_dev->mbox = &raid_dev->una_mbox64->mbox32; + + raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) & + (~0UL ^ 0xFUL)); + + raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8); + + align = ((void *)raid_dev->mbox - + ((void *)&raid_dev->una_mbox64->mbox32)); + + raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 + + align; + + // Allocate memory for commands issued internally + adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE, + &adapter->ibuf_dma_h); + if (!adapter->ibuf) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __FUNCTION__, + __LINE__)); + + goto out_free_common_mbox; + } + memset(adapter->ibuf, 0, MBOX_IBUF_SIZE); + + // Allocate memory for our SCSI Command Blocks and their associated + // memory + + /* + * Allocate memory for the base list of scb. Later allocate memory for + * CCBs and embedded components of each CCB and point the pointers in + * scb to the allocated components + * NOTE: The code to allocate SCB will be duplicated in all the LLD + * since the calling routine does not yet know the number of available + * commands. + */ + adapter->kscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_SCSI_CMDS, + GFP_KERNEL); + + if (adapter->kscb_list == NULL) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __FUNCTION__, + __LINE__)); + goto out_free_ibuf; + } + memset(adapter->kscb_list, 0, sizeof(scb_t) * MBOX_MAX_SCSI_CMDS); + + // memory allocation for our command packets + if (megaraid_mbox_setup_dma_pools(adapter) != 0) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __FUNCTION__, + __LINE__)); + goto out_free_scb_list; + } + + // Adjust the scb pointers and link in the free pool + epthru_pci_blk = raid_dev->epthru_pool; + sg_pci_blk = raid_dev->sg_pool; + mbox_pci_blk = raid_dev->mbox_pool; + + for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { + scb = adapter->kscb_list + i; + ccb = raid_dev->ccb_list + i; + + ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16); + ccb->raw_mbox = (uint8_t *)ccb->mbox; + ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8); + ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16; + + // make sure the mailbox is aligned properly + if (ccb->mbox_dma_h & 0x0F) { + con_log(CL_ANN, (KERN_CRIT + "megaraid mbox: not aligned on 16-bytes\n")); + + goto out_teardown_dma_pools; + } + + ccb->epthru = (mraid_epassthru_t *) + epthru_pci_blk[i].vaddr; + ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr; + ccb->pthru = (mraid_passthru_t *)ccb->epthru; + ccb->pthru_dma_h = ccb->epthru_dma_h; + + + ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr; + ccb->sgl_dma_h = sg_pci_blk[i].dma_addr; + ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64; + + scb->ccb = (caddr_t)ccb; + scb->gp = 0; + + scb->sno = i; // command index + + scb->scp = NULL; + scb->state = SCB_FREE; + scb->dma_direction = PCI_DMA_NONE; + scb->dma_type = MRAID_DMA_NONE; + scb->dev_channel = -1; + scb->dev_target = -1; + + // put scb in the free pool + list_add_tail(&scb->list, &adapter->kscb_pool); + } + + return 0; + +out_teardown_dma_pools: + megaraid_mbox_teardown_dma_pools(adapter); +out_free_scb_list: + kfree(adapter->kscb_list); +out_free_ibuf: + pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf, + adapter->ibuf_dma_h); +out_free_common_mbox: + pci_free_consistent(adapter->pdev, sizeof(mbox64_t), + (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); + + return -1; +} + + +/** + * megaraid_free_cmd_packets - free memory + * @param adapter : soft state of the raid controller + * + * Release memory resources allocated for commands + */ +static void +megaraid_free_cmd_packets(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + + megaraid_mbox_teardown_dma_pools(adapter); + + kfree(adapter->kscb_list); + + pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE, + (void *)adapter->ibuf, adapter->ibuf_dma_h); + + pci_free_consistent(adapter->pdev, sizeof(mbox64_t), + (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma); + return; +} + + +/** + * megaraid_mbox_setup_dma_pools - setup dma pool for command packets + * @param adapter : HBA soft state + * + * setup the dma pools for mailbox, passthru and extended passthru structures, + * and scatter-gather lists + */ +static int +megaraid_mbox_setup_dma_pools(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + struct mraid_pci_blk *epthru_pci_blk; + struct mraid_pci_blk *sg_pci_blk; + struct mraid_pci_blk *mbox_pci_blk; + int i; + + + + // Allocate memory for 16-bytes aligned mailboxes + raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool", + adapter->pdev, + sizeof(mbox64_t) + 16, + 16, 0); + + if (raid_dev->mbox_pool_handle == NULL) { + goto fail_setup_dma_pool; + } + + mbox_pci_blk = raid_dev->mbox_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { + mbox_pci_blk[i].vaddr = pci_pool_alloc( + raid_dev->mbox_pool_handle, + GFP_KERNEL, + &mbox_pci_blk[i].dma_addr); + if (!mbox_pci_blk[i].vaddr) { + goto fail_setup_dma_pool; + } + } + + /* + * Allocate memory for each embedded passthru strucuture pointer + * Request for a 128 bytes aligned structure for each passthru command + * structure + * Since passthru and extended passthru commands are exclusive, they + * share common memory pool. Passthru structures piggyback on memory + * allocted to extended passthru since passthru is smaller of the two + */ + raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru", + adapter->pdev, sizeof(mraid_epassthru_t), 128, 0); + + if (raid_dev->epthru_pool_handle == NULL) { + goto fail_setup_dma_pool; + } + + epthru_pci_blk = raid_dev->epthru_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { + epthru_pci_blk[i].vaddr = pci_pool_alloc( + raid_dev->epthru_pool_handle, + GFP_KERNEL, + &epthru_pci_blk[i].dma_addr); + if (!epthru_pci_blk[i].vaddr) { + goto fail_setup_dma_pool; + } + } + + + // Allocate memory for each scatter-gather list. Request for 512 bytes + // alignment for each sg list + raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg", + adapter->pdev, + sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE, + 512, 0); + + if (raid_dev->sg_pool_handle == NULL) { + goto fail_setup_dma_pool; + } + + sg_pci_blk = raid_dev->sg_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { + sg_pci_blk[i].vaddr = pci_pool_alloc( + raid_dev->sg_pool_handle, + GFP_KERNEL, + &sg_pci_blk[i].dma_addr); + if (!sg_pci_blk[i].vaddr) { + goto fail_setup_dma_pool; + } + } + + return 0; + +fail_setup_dma_pool: + megaraid_mbox_teardown_dma_pools(adapter); + return -1; +} + + +/** + * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets + * @param adapter : HBA soft state + * + * teardown the dma pool for mailbox, passthru and extended passthru + * structures, and scatter-gather lists + */ +static void +megaraid_mbox_teardown_dma_pools(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + struct mraid_pci_blk *epthru_pci_blk; + struct mraid_pci_blk *sg_pci_blk; + struct mraid_pci_blk *mbox_pci_blk; + int i; + + + sg_pci_blk = raid_dev->sg_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) { + pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr, + sg_pci_blk[i].dma_addr); + } + if (raid_dev->sg_pool_handle) + pci_pool_destroy(raid_dev->sg_pool_handle); + + + epthru_pci_blk = raid_dev->epthru_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) { + pci_pool_free(raid_dev->epthru_pool_handle, + epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr); + } + if (raid_dev->epthru_pool_handle) + pci_pool_destroy(raid_dev->epthru_pool_handle); + + + mbox_pci_blk = raid_dev->mbox_pool; + for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) { + pci_pool_free(raid_dev->mbox_pool_handle, + mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr); + } + if (raid_dev->mbox_pool_handle) + pci_pool_destroy(raid_dev->mbox_pool_handle); + + return; +} + + +/** + * megaraid_alloc_scb - detach and return a scb from the free list + * @adapter : controller's soft state + * + * return the scb from the head of the free list. NULL if there are none + * available + **/ +static inline scb_t * +megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp) +{ + struct list_head *head = &adapter->kscb_pool; + scb_t *scb = NULL; + unsigned long flags; + + // detach scb from free pool + spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags); + + if (list_empty(head)) { + spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); + return NULL; + } + + scb = list_entry(head->next, scb_t, list); + list_del_init(&scb->list); + + spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); + + scb->state = SCB_ACTIVE; + scb->scp = scp; + scb->dma_type = MRAID_DMA_NONE; + + return scb; +} + + +/** + * megaraid_dealloc_scb - return the scb to the free pool + * @adapter : controller's soft state + * @scb : scb to be freed + * + * return the scb back to the free list of scbs. The caller must 'flush' the + * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc. + * NOTE NOTE: Make sure the scb is not on any list before calling this + * routine. + **/ +static inline void +megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb) +{ + unsigned long flags; + + // put scb in the free pool + scb->state = SCB_FREE; + scb->scp = NULL; + spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags); + + list_add(&scb->list, &adapter->kscb_pool); + + spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags); + + return; +} + + +/** + * megaraid_mbox_mksgl - make the scatter-gather list + * @adapter - controller's soft state + * @scb - scsi control block + * + * prepare the scatter-gather list + */ +static inline int +megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb) +{ + struct scatterlist *sgl; + mbox_ccb_t *ccb; + struct page *page; + unsigned long offset; + struct scsi_cmnd *scp; + int sgcnt; + int i; + + + scp = scb->scp; + ccb = (mbox_ccb_t *)scb->ccb; + + // no mapping required if no data to be transferred + if (!scp->request_buffer || !scp->request_bufflen) + return 0; + + if (!scp->use_sg) { /* scatter-gather list not used */ + + page = virt_to_page(scp->request_buffer); + + offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK); + + ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset, + scp->request_bufflen, + scb->dma_direction); + scb->dma_type = MRAID_DMA_WBUF; + + /* + * We need to handle special 64-bit commands that need a + * minimum of 1 SG + */ + sgcnt = 1; + ccb->sgl64[0].address = ccb->buf_dma_h; + ccb->sgl64[0].length = scp->request_bufflen; + + return sgcnt; + } + + sgl = (struct scatterlist *)scp->request_buffer; + + // The number of sg elements returned must not exceed our limit + sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg, + scb->dma_direction); + + if (sgcnt > adapter->sglen) { + con_log(CL_ANN, (KERN_CRIT + "megaraid critical: too many sg elements:%d\n", + sgcnt)); + BUG(); + } + + scb->dma_type = MRAID_DMA_WSG; + + for (i = 0; i < sgcnt; i++, sgl++) { + ccb->sgl64[i].address = sg_dma_address(sgl); + ccb->sgl64[i].length = sg_dma_len(sgl); + } + + // Return count of SG nodes + return sgcnt; +} + + +/** + * mbox_post_cmd - issue a mailbox command + * @adapter - controller's soft state + * @scb - command to be issued + * + * post the command to the controller if mailbox is availble. + */ +static inline int +mbox_post_cmd(adapter_t *adapter, scb_t *scb) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox64_t *mbox64; + mbox_t *mbox; + mbox_ccb_t *ccb; + unsigned long flags; + unsigned int i = 0; + + + ccb = (mbox_ccb_t *)scb->ccb; + mbox = raid_dev->mbox; + mbox64 = raid_dev->mbox64; + + /* + * Check for busy mailbox. If it is, return failure - the caller + * should retry later. + */ + spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags); + + if (unlikely(mbox->busy)) { + do { + udelay(1); + i++; + rmb(); + } while(mbox->busy && (i < max_mbox_busy_wait)); + + if (mbox->busy) { + + spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); + + return -1; + } + } + + + // Copy this command's mailbox data into "adapter's" mailbox + memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22); + mbox->cmdid = scb->sno; + + adapter->outstanding_cmds++; + + if (scb->dma_direction == PCI_DMA_TODEVICE) { + if (!scb->scp->use_sg) { // sg list not used + pci_dma_sync_single_for_device(adapter->pdev, + ccb->buf_dma_h, + scb->scp->request_bufflen, + PCI_DMA_TODEVICE); + } + else { + pci_dma_sync_sg_for_device(adapter->pdev, + scb->scp->request_buffer, + scb->scp->use_sg, PCI_DMA_TODEVICE); + } + } + + mbox->busy = 1; // Set busy + mbox->poll = 0; + mbox->ack = 0; + wmb(); + + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); + + spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); + + return 0; +} + + +/** + * megaraid_queue_command - generic queue entry point for all LLDs + * @scp : pointer to the scsi command to be executed + * @done : callback routine to be called after the cmd has be completed + * + * Queue entry point for mailbox based controllers. + */ +static int +megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *)) +{ + adapter_t *adapter; + scb_t *scb; + int if_busy; + + adapter = SCP2ADAPTER(scp); + scp->scsi_done = done; + scp->result = 0; + + assert_spin_locked(adapter->host_lock); + + spin_unlock(adapter->host_lock); + + /* + * Allocate and build a SCB request + * if_busy flag will be set if megaraid_mbox_build_cmd() command could + * not allocate scb. We will return non-zero status in that case. + * NOTE: scb can be null even though certain commands completed + * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would + * return 0 in that case, and we would do the callback right away. + */ + if_busy = 0; + scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy); + + if (scb) { + megaraid_mbox_runpendq(adapter, scb); + } + + spin_lock(adapter->host_lock); + + if (!scb) { // command already completed + done(scp); + return 0; + } + + return if_busy; +} + + +/** + * megaraid_mbox_build_cmd - transform the mid-layer scsi command to megaraid + * firmware lingua + * @adapter - controller's soft state + * @scp - mid-layer scsi command pointer + * @busy - set if request could not be completed because of lack of + * resources + * + * convert the command issued by mid-layer to format understood by megaraid + * firmware. We also complete certain command without sending them to firmware + */ +static scb_t * +megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy) +{ + mraid_device_t *rdev = ADAP2RAIDDEV(adapter); + int channel; + int target; + int islogical; + mbox_ccb_t *ccb; + mraid_passthru_t *pthru; + mbox64_t *mbox64; + mbox_t *mbox; + scb_t *scb; + char skip[] = "skipping"; + char scan[] = "scanning"; + char *ss; + + + /* + * Get the appropriate device map for the device this command is + * intended for + */ + MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical); + + /* + * Logical drive commands + */ + if (islogical) { + switch (scp->cmnd[0]) { + case TEST_UNIT_READY: + /* + * Do we support clustering and is the support enabled + * If no, return success always + */ + if (!adapter->ha) { + scp->result = (DID_OK << 16); + return NULL; + } + + if (!(scb = megaraid_alloc_scb(adapter, scp))) { + scp->result = (DID_ERROR << 16); + *busy = 1; + return NULL; + } + + scb->dma_direction = scp->sc_data_direction; + scb->dev_channel = 0xFF; + scb->dev_target = target; + ccb = (mbox_ccb_t *)scb->ccb; + + /* + * The command id will be provided by the command + * issuance routine + */ + ccb->raw_mbox[0] = CLUSTER_CMD; + ccb->raw_mbox[2] = RESERVATION_STATUS; + ccb->raw_mbox[3] = target; + + return scb; + + case MODE_SENSE: + if (scp->use_sg) { + struct scatterlist *sgl; + caddr_t vaddr; + + sgl = (struct scatterlist *)scp->request_buffer; + if (sgl->page) { + vaddr = (caddr_t) + (page_address((&sgl[0])->page) + + (&sgl[0])->offset); + + memset(vaddr, 0, scp->cmnd[4]); + } + else { + con_log(CL_ANN, (KERN_WARNING + "megaraid mailbox: invalid sg:%d\n", + __LINE__)); + } + } + else { + memset(scp->request_buffer, 0, scp->cmnd[4]); + } + scp->result = (DID_OK << 16); + return NULL; + + case INQUIRY: + /* + * Display the channel scan for logical drives + * Do not display scan for a channel if already done. + */ + if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) { + + con_log(CL_ANN, (KERN_INFO + "scsi[%d]: scanning scsi channel %d", + adapter->host->host_no, + SCP2CHANNEL(scp))); + + con_log(CL_ANN, ( + " [virtual] for logical drives\n")); + + rdev->last_disp |= (1L << SCP2CHANNEL(scp)); + } + + /* Fall through */ + + case READ_CAPACITY: + /* + * Do not allow LUN > 0 for logical drives and + * requests for more than 40 logical drives + */ + if (SCP2LUN(scp)) { + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) { + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + + + /* Allocate a SCB and initialize passthru */ + if (!(scb = megaraid_alloc_scb(adapter, scp))) { + scp->result = (DID_ERROR << 16); + *busy = 1; + return NULL; + } + + ccb = (mbox_ccb_t *)scb->ccb; + scb->dev_channel = 0xFF; + scb->dev_target = target; + pthru = ccb->pthru; + mbox = ccb->mbox; + mbox64 = ccb->mbox64; + + pthru->timeout = 0; + pthru->ars = 1; + pthru->reqsenselen = 14; + pthru->islogical = 1; + pthru->logdrv = target; + pthru->cdblen = scp->cmd_len; + memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); + + mbox->cmd = MBOXCMD_PASSTHRU64; + scb->dma_direction = scp->sc_data_direction; + + pthru->dataxferlen = scp->request_bufflen; + pthru->dataxferaddr = ccb->sgl_dma_h; + pthru->numsge = megaraid_mbox_mksgl(adapter, + scb); + + mbox->xferaddr = 0xFFFFFFFF; + mbox64->xferaddr_lo = (uint32_t )ccb->pthru_dma_h; + mbox64->xferaddr_hi = 0; + + return scb; + + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + + /* + * Allocate a SCB and initialize mailbox + */ + if (!(scb = megaraid_alloc_scb(adapter, scp))) { + scp->result = (DID_ERROR << 16); + *busy = 1; + return NULL; + } + ccb = (mbox_ccb_t *)scb->ccb; + scb->dev_channel = 0xFF; + scb->dev_target = target; + mbox = ccb->mbox; + mbox64 = ccb->mbox64; + mbox->logdrv = target; + + /* + * A little HACK: 2nd bit is zero for all scsi read + * commands and is set for all scsi write commands + */ + mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64: + MBOXCMD_LREAD64 ; + + /* + * 6-byte READ(0x08) or WRITE(0x0A) cdb + */ + if (scp->cmd_len == 6) { + mbox->numsectors = (uint32_t)scp->cmnd[4]; + mbox->lba = + ((uint32_t)scp->cmnd[1] << 16) | + ((uint32_t)scp->cmnd[2] << 8) | + (uint32_t)scp->cmnd[3]; + + mbox->lba &= 0x1FFFFF; + } + + /* + * 10-byte READ(0x28) or WRITE(0x2A) cdb + */ + else if (scp->cmd_len == 10) { + mbox->numsectors = + (uint32_t)scp->cmnd[8] | + ((uint32_t)scp->cmnd[7] << 8); + mbox->lba = + ((uint32_t)scp->cmnd[2] << 24) | + ((uint32_t)scp->cmnd[3] << 16) | + ((uint32_t)scp->cmnd[4] << 8) | + (uint32_t)scp->cmnd[5]; + } + + /* + * 12-byte READ(0xA8) or WRITE(0xAA) cdb + */ + else if (scp->cmd_len == 12) { + mbox->lba = + ((uint32_t)scp->cmnd[2] << 24) | + ((uint32_t)scp->cmnd[3] << 16) | + ((uint32_t)scp->cmnd[4] << 8) | + (uint32_t)scp->cmnd[5]; + + mbox->numsectors = + ((uint32_t)scp->cmnd[6] << 24) | + ((uint32_t)scp->cmnd[7] << 16) | + ((uint32_t)scp->cmnd[8] << 8) | + (uint32_t)scp->cmnd[9]; + } + else { + con_log(CL_ANN, (KERN_WARNING + "megaraid: unsupported CDB length\n")); + + megaraid_dealloc_scb(adapter, scb); + + scp->result = (DID_ERROR << 16); + return NULL; + } + + scb->dma_direction = scp->sc_data_direction; + + // Calculate Scatter-Gather info + mbox64->xferaddr_lo = (uint32_t )ccb->sgl_dma_h; + mbox->numsge = megaraid_mbox_mksgl(adapter, + scb); + mbox->xferaddr = 0xFFFFFFFF; + mbox64->xferaddr_hi = 0; + + return scb; + + case RESERVE: + case RELEASE: + /* + * Do we support clustering and is the support enabled + */ + if (!adapter->ha) { + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + + /* + * Allocate a SCB and initialize mailbox + */ + if (!(scb = megaraid_alloc_scb(adapter, scp))) { + scp->result = (DID_ERROR << 16); + *busy = 1; + return NULL; + } + + ccb = (mbox_ccb_t *)scb->ccb; + scb->dev_channel = 0xFF; + scb->dev_target = target; + ccb->raw_mbox[0] = CLUSTER_CMD; + ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ? + RESERVE_LD : RELEASE_LD; + + ccb->raw_mbox[3] = target; + scb->dma_direction = scp->sc_data_direction; + + return scb; + + default: + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + } + else { // Passthru device commands + + // Do not allow access to target id > 15 or LUN > 7 + if (target > 15 || SCP2LUN(scp) > 7) { + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + + // if fast load option was set and scan for last device is + // over, reset the fast_load flag so that during a possible + // next scan, devices can be made available + if (rdev->fast_load && (target == 15) && + (SCP2CHANNEL(scp) == adapter->max_channel -1)) { + + con_log(CL_ANN, (KERN_INFO + "megaraid[%d]: physical device scan re-enabled\n", + adapter->host->host_no)); + rdev->fast_load = 0; + } + + /* + * Display the channel scan for physical devices + */ + if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) { + + ss = rdev->fast_load ? skip : scan; + + con_log(CL_ANN, (KERN_INFO + "scsi[%d]: %s scsi channel %d [Phy %d]", + adapter->host->host_no, ss, SCP2CHANNEL(scp), + channel)); + + con_log(CL_ANN, ( + " for non-raid devices\n")); + + rdev->last_disp |= (1L << SCP2CHANNEL(scp)); + } + + // disable channel sweep if fast load option given + if (rdev->fast_load) { + scp->result = (DID_BAD_TARGET << 16); + return NULL; + } + + // Allocate a SCB and initialize passthru + if (!(scb = megaraid_alloc_scb(adapter, scp))) { + scp->result = (DID_ERROR << 16); + *busy = 1; + return NULL; + } + + ccb = (mbox_ccb_t *)scb->ccb; + scb->dev_channel = channel; + scb->dev_target = target; + scb->dma_direction = scp->sc_data_direction; + mbox = ccb->mbox; + mbox64 = ccb->mbox64; + + // Does this firmware support extended CDBs + if (adapter->max_cdb_sz == 16) { + mbox->cmd = MBOXCMD_EXTPTHRU; + + megaraid_mbox_prepare_epthru(adapter, scb, scp); + + mbox64->xferaddr_lo = (uint32_t)ccb->epthru_dma_h; + mbox64->xferaddr_hi = 0; + mbox->xferaddr = 0xFFFFFFFF; + } + else { + mbox->cmd = MBOXCMD_PASSTHRU64; + + megaraid_mbox_prepare_pthru(adapter, scb, scp); + + mbox64->xferaddr_lo = (uint32_t)ccb->pthru_dma_h; + mbox64->xferaddr_hi = 0; + mbox->xferaddr = 0xFFFFFFFF; + } + return scb; + } + + // NOT REACHED +} + + +/** + * megaraid_mbox_runpendq - execute commands queued in the pending queue + * @adapter : controller's soft state + * @scb : SCB to be queued in the pending list + * + * scan the pending list for commands which are not yet issued and try to + * post to the controller. The SCB can be a null pointer, which would indicate + * no SCB to be queue, just try to execute the ones in the pending list. + * + * NOTE: We do not actually traverse the pending list. The SCBs are plucked + * out from the head of the pending list. If it is successfully issued, the + * next SCB is at the head now. + */ +static void +megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q) +{ + scb_t *scb; + unsigned long flags; + + spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); + + if (scb_q) { + scb_q->state = SCB_PENDQ; + list_add_tail(&scb_q->list, &adapter->pend_list); + } + + // if the adapter in not in quiescent mode, post the commands to FW + if (adapter->quiescent) { + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); + return; + } + + while (!list_empty(&adapter->pend_list)) { + + assert_spin_locked(PENDING_LIST_LOCK(adapter)); + + scb = list_entry(adapter->pend_list.next, scb_t, list); + + // remove the scb from the pending list and try to + // issue. If we are unable to issue it, put back in + // the pending list and return + + list_del_init(&scb->list); + + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); + + // if mailbox was busy, return SCB back to pending + // list. Make sure to add at the head, since that's + // where it would have been removed from + + scb->state = SCB_ISSUED; + + if (mbox_post_cmd(adapter, scb) != 0) { + + spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); + + scb->state = SCB_PENDQ; + + list_add(&scb->list, &adapter->pend_list); + + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), + flags); + + return; + } + + spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); + } + + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); + + + return; +} + + +/** + * megaraid_mbox_prepare_pthru - prepare a command for physical devices + * @adapter - pointer to controller's soft state + * @scb - scsi control block + * @scp - scsi command from the mid-layer + * + * prepare a command for the scsi physical devices + */ +static void +megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb, + struct scsi_cmnd *scp) +{ + mbox_ccb_t *ccb; + mraid_passthru_t *pthru; + uint8_t channel; + uint8_t target; + + ccb = (mbox_ccb_t *)scb->ccb; + pthru = ccb->pthru; + channel = scb->dev_channel; + target = scb->dev_target; + + // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout + pthru->timeout = 4; + pthru->ars = 1; + pthru->islogical = 0; + pthru->channel = 0; + pthru->target = (channel << 4) | target; + pthru->logdrv = SCP2LUN(scp); + pthru->reqsenselen = 14; + pthru->cdblen = scp->cmd_len; + + memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); + + if (scp->request_bufflen) { + pthru->dataxferlen = scp->request_bufflen; + pthru->dataxferaddr = ccb->sgl_dma_h; + pthru->numsge = megaraid_mbox_mksgl(adapter, scb); + } + else { + pthru->dataxferaddr = 0; + pthru->dataxferlen = 0; + pthru->numsge = 0; + } + return; +} + + +/** + * megaraid_mbox_prepare_epthru - prepare a command for physical devices + * @adapter - pointer to controller's soft state + * @scb - scsi control block + * @scp - scsi command from the mid-layer + * + * prepare a command for the scsi physical devices. This rountine prepares + * commands for devices which can take extended CDBs (>10 bytes) + */ +static void +megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb, + struct scsi_cmnd *scp) +{ + mbox_ccb_t *ccb; + mraid_epassthru_t *epthru; + uint8_t channel; + uint8_t target; + + ccb = (mbox_ccb_t *)scb->ccb; + epthru = ccb->epthru; + channel = scb->dev_channel; + target = scb->dev_target; + + // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout + epthru->timeout = 4; + epthru->ars = 1; + epthru->islogical = 0; + epthru->channel = 0; + epthru->target = (channel << 4) | target; + epthru->logdrv = SCP2LUN(scp); + epthru->reqsenselen = 14; + epthru->cdblen = scp->cmd_len; + + memcpy(epthru->cdb, scp->cmnd, scp->cmd_len); + + if (scp->request_bufflen) { + epthru->dataxferlen = scp->request_bufflen; + epthru->dataxferaddr = ccb->sgl_dma_h; + epthru->numsge = megaraid_mbox_mksgl(adapter, scb); + } + else { + epthru->dataxferaddr = 0; + epthru->dataxferlen = 0; + epthru->numsge = 0; + } + return; +} + + +/** + * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs + * @adapter - controller's soft state + * + * Interrupt ackrowledgement sequence for memory mapped HBAs. Find out the + * completed command and put them on the completed list for later processing. + * + * Returns: 1 if the interrupt is valid, 0 otherwise + */ +static inline int +megaraid_ack_sequence(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox_t *mbox; + scb_t *scb; + uint8_t nstatus; + uint8_t completed[MBOX_MAX_FIRMWARE_STATUS]; + struct list_head clist; + int handled; + uint32_t dword; + unsigned long flags; + int i, j; + + + mbox = raid_dev->mbox; + + // move the SCBs from the firmware completed array to our local list + INIT_LIST_HEAD(&clist); + + // loop till F/W has more commands for us to complete + handled = 0; + spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags); + do { + /* + * Check if a valid interrupt is pending. If found, force the + * interrupt line low. + */ + dword = RDOUTDOOR(raid_dev); + if (dword != 0x10001234) break; + + handled = 1; + + WROUTDOOR(raid_dev, 0x10001234); + + nstatus = 0; + // wait for valid numstatus to post + for (i = 0; i < 0xFFFFF; i++) { + if (mbox->numstatus != 0xFF) { + nstatus = mbox->numstatus; + break; + } + rmb(); + } + mbox->numstatus = 0xFF; + + adapter->outstanding_cmds -= nstatus; + + for (i = 0; i < nstatus; i++) { + + // wait for valid command index to post + for (j = 0; j < 0xFFFFF; j++) { + if (mbox->completed[i] != 0xFF) break; + rmb(); + } + completed[i] = mbox->completed[i]; + mbox->completed[i] = 0xFF; + + if (completed[i] == 0xFF) { + con_log(CL_ANN, (KERN_CRIT + "megaraid: command posting timed out\n")); + + BUG(); + continue; + } + + // Get SCB associated with this command id + if (completed[i] >= MBOX_MAX_SCSI_CMDS) { + // a cmm command + scb = adapter->uscb_list + (completed[i] - + MBOX_MAX_SCSI_CMDS); + } + else { + // an os command + scb = adapter->kscb_list + completed[i]; + } + + scb->status = mbox->status; + list_add_tail(&scb->list, &clist); + } + + // Acknowledge interrupt + WRINDOOR(raid_dev, 0x02); + + } while(1); + + spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags); + + + // put the completed commands in the completed list. DPC would + // complete these commands later + spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); + + list_splice(&clist, &adapter->completed_list); + + spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); + + + // schedule the DPC if there is some work for it + if (handled) + tasklet_schedule(&adapter->dpc_h); + + return handled; +} + + +/** + * megaraid_isr - isr for memory based mailbox based controllers + * @irq - irq + * @devp - pointer to our soft state + * @regs - unused + * + * Interrupt service routine for memory-mapped mailbox controllers. + */ +static irqreturn_t +megaraid_isr(int irq, void *devp, struct pt_regs *regs) +{ + adapter_t *adapter = devp; + int handled; + + handled = megaraid_ack_sequence(adapter); + + /* Loop through any pending requests */ + if (!adapter->quiescent) { + megaraid_mbox_runpendq(adapter, NULL); + } + + return IRQ_RETVAL(handled); +} + + +/** + * megaraid_mbox_sync_scb - sync kernel buffers + * @adapter : controller's soft state + * @scb : pointer to the resource packet + * + * DMA sync if required. + */ +static inline void +megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb) +{ + mbox_ccb_t *ccb; + + ccb = (mbox_ccb_t *)scb->ccb; + + switch (scb->dma_type) { + + case MRAID_DMA_WBUF: + if (scb->dma_direction == PCI_DMA_FROMDEVICE) { + pci_dma_sync_single_for_cpu(adapter->pdev, + ccb->buf_dma_h, + scb->scp->request_bufflen, + PCI_DMA_FROMDEVICE); + } + + pci_unmap_page(adapter->pdev, ccb->buf_dma_h, + scb->scp->request_bufflen, scb->dma_direction); + + break; + + case MRAID_DMA_WSG: + if (scb->dma_direction == PCI_DMA_FROMDEVICE) { + pci_dma_sync_sg_for_cpu(adapter->pdev, + scb->scp->request_buffer, + scb->scp->use_sg, PCI_DMA_FROMDEVICE); + } + + pci_unmap_sg(adapter->pdev, scb->scp->request_buffer, + scb->scp->use_sg, scb->dma_direction); + + break; + + default: + break; + } + + return; +} + + +/** + * megaraid_mbox_dpc - the tasklet to complete the commands from completed list + * @devp : pointer to HBA soft state + * + * Pick up the commands from the completed list and send back to the owners. + * This is a reentrant function and does not assume any locks are held while + * it is being called. + */ +static void +megaraid_mbox_dpc(unsigned long devp) +{ + adapter_t *adapter = (adapter_t *)devp; + mraid_device_t *raid_dev; + struct list_head clist; + struct scatterlist *sgl; + scb_t *scb; + scb_t *tmp; + struct scsi_cmnd *scp; + mraid_passthru_t *pthru; + mraid_epassthru_t *epthru; + mbox_ccb_t *ccb; + int islogical; + int pdev_index; + int pdev_state; + mbox_t *mbox; + unsigned long flags; + uint8_t c; + int status; + + + if (!adapter) return; + + raid_dev = ADAP2RAIDDEV(adapter); + + // move the SCBs from the completed list to our local list + INIT_LIST_HEAD(&clist); + + spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); + + list_splice_init(&adapter->completed_list, &clist); + + spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); + + + list_for_each_entry_safe(scb, tmp, &clist, list) { + + status = scb->status; + scp = scb->scp; + ccb = (mbox_ccb_t *)scb->ccb; + pthru = ccb->pthru; + epthru = ccb->epthru; + mbox = ccb->mbox; + + // Make sure f/w has completed a valid command + if (scb->state != SCB_ISSUED) { + con_log(CL_ANN, (KERN_CRIT + "megaraid critical err: invalid command %d:%d:%p\n", + scb->sno, scb->state, scp)); + BUG(); + continue; // Must never happen! + } + + // check for the management command and complete it right away + if (scb->sno >= MBOX_MAX_SCSI_CMDS) { + scb->state = SCB_FREE; + scb->status = status; + + // remove from local clist + list_del_init(&scb->list); + + megaraid_mbox_mm_done(adapter, scb); + + continue; + } + + // Was an abort issued for this command earlier + if (scb->state & SCB_ABORT) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: aborted cmd %lx[%x] completed\n", + scp->serial_number, scb->sno)); + } + + /* + * If the inquiry came of a disk drive which is not part of + * any RAID array, expose it to the kernel. For this to be + * enabled, user must set the "megaraid_expose_unconf_disks" + * flag to 1 by specifying it on module parameter list. + * This would enable data migration off drives from other + * configurations. + */ + islogical = MRAID_IS_LOGICAL(adapter, scp); + if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0 + && IS_RAID_CH(raid_dev, scb->dev_channel)) { + + if (scp->use_sg) { + sgl = (struct scatterlist *) + scp->request_buffer; + + if (sgl->page) { + c = *(unsigned char *) + (page_address((&sgl[0])->page) + + (&sgl[0])->offset); + } + else { + con_log(CL_ANN, (KERN_WARNING + "megaraid mailbox: invalid sg:%d\n", + __LINE__)); + c = 0; + } + } + else { + c = *(uint8_t *)scp->request_buffer; + } + + if ((c & 0x1F ) == TYPE_DISK) { + pdev_index = (scb->dev_channel * 16) + + scb->dev_target; + pdev_state = + raid_dev->pdrv_state[pdev_index] & 0x0F; + + if (pdev_state == PDRV_ONLINE || + pdev_state == PDRV_FAILED || + pdev_state == PDRV_RBLD || + pdev_state == PDRV_HOTSPARE || + megaraid_expose_unconf_disks == 0) { + + status = 0xF0; + } + } + } + + // Convert MegaRAID status to Linux error code + switch (status) { + + case 0x00: + + scp->result = (DID_OK << 16); + break; + + case 0x02: + + /* set sense_buffer and result fields */ + if (mbox->cmd == MBOXCMD_PASSTHRU || + mbox->cmd == MBOXCMD_PASSTHRU64) { + + memcpy(scp->sense_buffer, pthru->reqsensearea, + 14); + + scp->result = DRIVER_SENSE << 24 | + DID_OK << 16 | CHECK_CONDITION << 1; + } + else { + if (mbox->cmd == MBOXCMD_EXTPTHRU) { + + memcpy(scp->sense_buffer, + epthru->reqsensearea, 14); + + scp->result = DRIVER_SENSE << 24 | + DID_OK << 16 | + CHECK_CONDITION << 1; + } else { + scp->sense_buffer[0] = 0x70; + scp->sense_buffer[2] = ABORTED_COMMAND; + scp->result = CHECK_CONDITION << 1; + } + } + break; + + case 0x08: + + scp->result = DID_BUS_BUSY << 16 | status; + break; + + default: + + /* + * If TEST_UNIT_READY fails, we know RESERVATION_STATUS + * failed + */ + if (scp->cmnd[0] == TEST_UNIT_READY) { + scp->result = DID_ERROR << 16 | + RESERVATION_CONFLICT << 1; + } + else + /* + * Error code returned is 1 if Reserve or Release + * failed or the input parameter is invalid + */ + if (status == 1 && (scp->cmnd[0] == RESERVE || + scp->cmnd[0] == RELEASE)) { + + scp->result = DID_ERROR << 16 | + RESERVATION_CONFLICT << 1; + } + else { + scp->result = DID_BAD_TARGET << 16 | status; + } + } + + // print a debug message for all failed commands + if (status) { + megaraid_mbox_display_scb(adapter, scb); + } + + // Free our internal resources and call the mid-layer callback + // routine + megaraid_mbox_sync_scb(adapter, scb); + + // remove from local clist + list_del_init(&scb->list); + + // put back in free list + megaraid_dealloc_scb(adapter, scb); + + // send the scsi packet back to kernel + spin_lock(adapter->host_lock); + scp->scsi_done(scp); + spin_unlock(adapter->host_lock); + } + + return; +} + + +/** + * megaraid_abort_handler - abort the scsi command + * @scp : command to be aborted + * + * Abort a previous SCSI request. Only commands on the pending list can be + * aborted. All the commands issued to the F/W must complete. + **/ +static int +megaraid_abort_handler(struct scsi_cmnd *scp) +{ + adapter_t *adapter; + mraid_device_t *raid_dev; + scb_t *scb; + scb_t *tmp; + int found; + unsigned long flags; + int i; + + + adapter = SCP2ADAPTER(scp); + raid_dev = ADAP2RAIDDEV(adapter); + + assert_spin_locked(adapter->host_lock); + + con_log(CL_ANN, (KERN_WARNING + "megaraid: aborting-%ld cmd=%x <c=%d t=%d l=%d>\n", + scp->serial_number, scp->cmnd[0], SCP2CHANNEL(scp), + SCP2TARGET(scp), SCP2LUN(scp))); + + // If FW has stopped responding, simply return failure + if (raid_dev->hw_error) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: hw error, not aborting\n")); + return FAILED; + } + + // There might a race here, where the command was completed by the + // firmware and now it is on the completed list. Before we could + // complete the command to the kernel in dpc, the abort came. + // Find out if this is the case to avoid the race. + scb = NULL; + spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags); + list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) { + + if (scb->scp == scp) { // Found command + + list_del_init(&scb->list); // from completed list + + con_log(CL_ANN, (KERN_WARNING + "megaraid: %ld:%d[%d:%d], abort from completed list\n", + scp->serial_number, scb->sno, + scb->dev_channel, scb->dev_target)); + + scp->result = (DID_ABORT << 16); + scp->scsi_done(scp); + + megaraid_dealloc_scb(adapter, scb); + + spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), + flags); + + return SUCCESS; + } + } + spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags); + + + // Find out if this command is still on the pending list. If it is and + // was never issued, abort and return success. If the command is owned + // by the firmware, we must wait for it to complete by the FW. + spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); + list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { + + if (scb->scp == scp) { // Found command + + list_del_init(&scb->list); // from pending list + + ASSERT(!(scb->state & SCB_ISSUED)); + + con_log(CL_ANN, (KERN_WARNING + "megaraid abort: %ld[%d:%d], driver owner\n", + scp->serial_number, scb->dev_channel, + scb->dev_target)); + + scp->result = (DID_ABORT << 16); + scp->scsi_done(scp); + + megaraid_dealloc_scb(adapter, scb); + + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), + flags); + + return SUCCESS; + } + } + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); + + + // Check do we even own this command, in which case this would be + // owned by the firmware. The only way to locate the FW scb is to + // traverse through the list of all SCB, since driver does not + // maintain these SCBs on any list + found = 0; + for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) { + scb = adapter->kscb_list + i; + + if (scb->scp == scp) { + + found = 1; + + if (!(scb->state & SCB_ISSUED)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid abort: %ld%d[%d:%d], invalid state\n", + scp->serial_number, scb->sno, scb->dev_channel, + scb->dev_target)); + BUG(); + } + else { + con_log(CL_ANN, (KERN_WARNING + "megaraid abort: %ld:%d[%d:%d], fw owner\n", + scp->serial_number, scb->sno, scb->dev_channel, + scb->dev_target)); + } + } + } + + if (!found) { + con_log(CL_ANN, (KERN_WARNING + "megaraid abort: scsi cmd:%ld, do now own\n", + scp->serial_number)); + + // FIXME: Should there be a callback for this command? + return SUCCESS; + } + + // We cannot actually abort a command owned by firmware, return + // failure and wait for reset. In host reset handler, we will find out + // if the HBA is still live + return FAILED; +} + + +/** + * megaraid_reset_handler - device reset hadler for mailbox based driver + * @scp : reference command + * + * Reset handler for the mailbox based controller. First try to find out if + * the FW is still live, in which case the outstanding commands counter mut go + * down to 0. If that happens, also issue the reservation reset command to + * relinquish (possible) reservations on the logical drives connected to this + * host + **/ +static int +megaraid_reset_handler(struct scsi_cmnd *scp) +{ + adapter_t *adapter; + scb_t *scb; + scb_t *tmp; + mraid_device_t *raid_dev; + unsigned long flags; + uint8_t raw_mbox[sizeof(mbox_t)]; + int rval; + int recovery_window; + int recovering; + int i; + + adapter = SCP2ADAPTER(scp); + raid_dev = ADAP2RAIDDEV(adapter); + + assert_spin_locked(adapter->host_lock); + + con_log(CL_ANN, (KERN_WARNING "megaraid: reseting the host...\n")); + + // return failure if adapter is not responding + if (raid_dev->hw_error) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: hw error, cannot reset\n")); + return FAILED; + } + + + // Under exceptional conditions, FW can take up to 3 minutes to + // complete command processing. Wait for additional 2 minutes for the + // pending commands counter to go down to 0. If it doesn't, let the + // controller be marked offline + // Also, reset all the commands currently owned by the driver + spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); + list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { + + list_del_init(&scb->list); // from pending list + + con_log(CL_ANN, (KERN_WARNING + "megaraid: %ld:%d[%d:%d], reset from pending list\n", + scp->serial_number, scb->sno, + scb->dev_channel, scb->dev_target)); + + scp->result = (DID_RESET << 16); + scp->scsi_done(scp); + + megaraid_dealloc_scb(adapter, scb); + } + spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); + + if (adapter->outstanding_cmds) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: %d outstanding commands. Max wait %d sec\n", + adapter->outstanding_cmds, MBOX_RESET_WAIT)); + } + + spin_unlock(adapter->host_lock); + + recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; + + recovering = adapter->outstanding_cmds; + + for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) { + + megaraid_ack_sequence(adapter); + + // print a message once every 5 seconds only + if (!(i % 5)) { + con_log(CL_ANN, ( + "megaraid mbox: Wait for %d commands to complete:%d\n", + adapter->outstanding_cmds, + MBOX_RESET_WAIT - i)); + } + + // bailout if no recovery happended in reset time + if ((i == MBOX_RESET_WAIT) && + (recovering == adapter->outstanding_cmds)) { + break; + } + + msleep(1000); + } + + spin_lock(adapter->host_lock); + + // If still outstanding commands, bail out + if (adapter->outstanding_cmds) { + con_log(CL_ANN, (KERN_WARNING + "megaraid mbox: critical hardware error!\n")); + + raid_dev->hw_error = 1; + + return FAILED; + } + else { + con_log(CL_ANN, (KERN_NOTICE + "megaraid mbox: reset sequence completed sucessfully\n")); + } + + + // If the controller supports clustering, reset reservations + if (!adapter->ha) return SUCCESS; + + // clear reservations if any + raw_mbox[0] = CLUSTER_CMD; + raw_mbox[2] = RESET_RESERVATIONS; + + rval = SUCCESS; + if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) { + con_log(CL_ANN, + (KERN_INFO "megaraid: reservation reset\n")); + } + else { + rval = FAILED; + con_log(CL_ANN, (KERN_WARNING + "megaraid: reservation reset failed\n")); + } + + return rval; +} + + +/* + * START: internal commands library + * + * This section of the driver has the common routine used by the driver and + * also has all the FW routines + */ + +/** + * mbox_post_sync_cmd() - blocking command to the mailbox based controllers + * @adapter - controller's soft state + * @raw_mbox - the mailbox + * + * Issue a scb in synchronous and non-interrupt mode for mailbox based + * controllers + */ +static int +mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[]) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox64_t *mbox64; + mbox_t *mbox; + uint8_t status; + int i; + + + mbox64 = raid_dev->mbox64; + mbox = raid_dev->mbox; + + /* + * Wait until mailbox is free + */ + if (megaraid_busywait_mbox(raid_dev) != 0) + goto blocked_mailbox; + + /* + * Copy mailbox data into host structure + */ + memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16); + mbox->cmdid = 0xFE; + mbox->busy = 1; + mbox->poll = 0; + mbox->ack = 0; + mbox->numstatus = 0xFF; + mbox->status = 0xFF; + + wmb(); + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); + + // wait for maximum 1 second for status to post. If the status is not + // available within 1 second, assume FW is initializing and wait + // for an extended amount of time + if (mbox->numstatus == 0xFF) { // status not yet available + udelay(25);; + + for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) { + rmb(); + msleep(1); + } + + + if (i == 1000) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid mailbox: wait for FW to boot ")); + + for (i = 0; (mbox->numstatus == 0xFF) && + (i < MBOX_RESET_WAIT); i++) { + rmb(); + con_log(CL_ANN, ("\b\b\b\b\b[%03d]", + MBOX_RESET_WAIT - i)); + msleep(1000); + } + + if (i == MBOX_RESET_WAIT) { + + con_log(CL_ANN, ( + "\nmegaraid mailbox: status not available\n")); + + return -1; + } + con_log(CL_ANN, ("\b\b\b\b\b[ok] \n")); + } + } + + // wait for maximum 1 second for poll semaphore + if (mbox->poll != 0x77) { + udelay(25); + + for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) { + rmb(); + msleep(1); + } + + if (i == 1000) { + con_log(CL_ANN, (KERN_WARNING + "megaraid mailbox: could not get poll semaphore\n")); + return -1; + } + } + + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2); + wmb(); + + // wait for maximum 1 second for acknowledgement + if (RDINDOOR(raid_dev) & 0x2) { + udelay(25); + + for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) { + rmb(); + msleep(1); + } + + if (i == 1000) { + con_log(CL_ANN, (KERN_WARNING + "megaraid mailbox: could not acknowledge\n")); + return -1; + } + } + mbox->poll = 0; + mbox->ack = 0x77; + + status = mbox->status; + + // invalidate the completed command id array. After command + // completion, firmware would write the valid id. + mbox->numstatus = 0xFF; + mbox->status = 0xFF; + for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) { + mbox->completed[i] = 0xFF; + } + + return status; + +blocked_mailbox: + + con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") ); + return -1; +} + + +/** + * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers + * @adapter - controller's soft state + * @raw_mbox - the mailbox + * + * Issue a scb in synchronous and non-interrupt mode for mailbox based + * controllers. This is a faster version of the synchronous command and + * therefore can be called in interrupt-context as well + */ +static int +mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox_t *mbox; + long i; + + + mbox = raid_dev->mbox; + + // return immediately if the mailbox is busy + if (mbox->busy) return -1; + + // Copy mailbox data into host structure + memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14); + mbox->cmdid = 0xFE; + mbox->busy = 1; + mbox->poll = 0; + mbox->ack = 0; + mbox->numstatus = 0xFF; + mbox->status = 0xFF; + + wmb(); + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); + + for (i = 0; i < 0xFFFFF; i++) { + if (mbox->numstatus != 0xFF) break; + } + + if (i == 0xFFFFF) { + // We may need to re-calibrate the counter + con_log(CL_ANN, (KERN_CRIT + "megaraid: fast sync command timed out\n")); + } + + WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2); + wmb(); + + return mbox->status; +} + + +/** + * megaraid_busywait_mbox() - Wait until the controller's mailbox is available + * @raid_dev - RAID device (HBA) soft state + * + * wait until the controller's mailbox is available to accept more commands. + * wait for at most 1 second + */ +static int +megaraid_busywait_mbox(mraid_device_t *raid_dev) +{ + mbox_t *mbox = raid_dev->mbox; + int i = 0; + + if (mbox->busy) { + udelay(25); + for (i = 0; mbox->busy && i < 1000; i++) + msleep(1); + } + + if (i < 1000) return 0; + else return -1; +} + + +/** + * megaraid_mbox_product_info - some static information about the controller + * @adapter - our soft state + * + * issue commands to the controller to grab some parameters required by our + * caller. + */ +static int +megaraid_mbox_product_info(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + mraid_pinfo_t *pinfo; + dma_addr_t pinfo_dma_h; + mraid_inquiry3_t *mraid_inq3; + int i; + + + memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); + mbox = (mbox_t *)raw_mbox; + + /* + * Issue an ENQUIRY3 command to find out certain adapter parameters, + * e.g., max channels, max commands etc. + */ + pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t), + &pinfo_dma_h); + + if (pinfo == NULL) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __FUNCTION__, + __LINE__)); + + return -1; + } + memset(pinfo, 0, sizeof(mraid_pinfo_t)); + + mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; + memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); + + raw_mbox[0] = FC_NEW_CONFIG; + raw_mbox[2] = NC_SUBOP_ENQUIRY3; + raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; + + // Issue the command + if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { + + con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n")); + + pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), + pinfo, pinfo_dma_h); + + return -1; + } + + /* + * Collect information about state of each physical drive + * attached to the controller. We will expose all the disks + * which are not part of RAID + */ + mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf; + for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) { + raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i]; + } + + /* + * Get product info for information like number of channels, + * maximum commands supported. + */ + memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); + mbox->xferaddr = (uint32_t)pinfo_dma_h; + + raw_mbox[0] = FC_NEW_CONFIG; + raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; + + if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: product info failed\n")); + + pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), + pinfo, pinfo_dma_h); + + return -1; + } + + /* + * Setup some parameters for host, as required by our caller + */ + adapter->max_channel = pinfo->nchannels; + + /* + * we will export all the logical drives on a single channel. + * Add 1 since inquires do not come for inititor ID + */ + adapter->max_target = MAX_LOGICAL_DRIVES_40LD + 1; + adapter->max_lun = 8; // up to 8 LUNs for non-disk devices + + /* + * These are the maximum outstanding commands for the scsi-layer + */ + adapter->max_cmds = MBOX_MAX_SCSI_CMDS; + + memset(adapter->fw_version, 0, VERSION_SIZE); + memset(adapter->bios_version, 0, VERSION_SIZE); + + memcpy(adapter->fw_version, pinfo->fw_version, 4); + adapter->fw_version[4] = 0; + + memcpy(adapter->bios_version, pinfo->bios_version, 4); + adapter->bios_version[4] = 0; + + con_log(CL_ANN, (KERN_NOTICE + "megaraid: fw version:[%s] bios version:[%s]\n", + adapter->fw_version, adapter->bios_version)); + + pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo, + pinfo_dma_h); + + return 0; +} + + + +/** + * megaraid_mbox_extended_cdb - check for support for extended CDBs + * @adapter - soft state for the controller + * + * this routine check whether the controller in question supports extended + * ( > 10 bytes ) CDBs + */ +static int +megaraid_mbox_extended_cdb(adapter_t *adapter) +{ + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + int rval; + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); + mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; + + memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); + + raw_mbox[0] = MAIN_MISC_OPCODE; + raw_mbox[2] = SUPPORT_EXT_CDB; + + /* + * Issue the command + */ + rval = 0; + if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { + rval = -1; + } + + return rval; +} + + +/** + * megaraid_mbox_support_ha - Do we support clustering + * @adapter - soft state for the controller + * @init_id - ID of the initiator + * + * Determine if the firmware supports clustering and the ID of the initiator. + */ +static int +megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id) +{ + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + int rval; + + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox)); + + mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; + + memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); + + raw_mbox[0] = GET_TARGET_ID; + + // Issue the command + *init_id = 7; + rval = -1; + if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { + + *init_id = *(uint8_t *)adapter->ibuf; + + con_log(CL_ANN, (KERN_INFO + "megaraid: cluster firmware, initiator ID: %d\n", + *init_id)); + + rval = 0; + } + + return rval; +} + + +/** + * megaraid_mbox_support_random_del - Do we support random deletion + * @adapter - soft state for the controller + * + * Determine if the firmware supports random deletion + * Return: 1 is operation supported, 0 otherwise + */ +static int +megaraid_mbox_support_random_del(adapter_t *adapter) +{ + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + int rval; + + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); + + raw_mbox[0] = FC_DEL_LOGDRV; + raw_mbox[2] = OP_SUP_DEL_LOGDRV; + + // Issue the command + rval = 0; + if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { + + con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n")); + + rval = 1; + } + + return rval; +} + + +/** + * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware + * @adapter - soft state for the controller + * + * Find out the maximum number of scatter-gather elements supported by the + * firmware + */ +static int +megaraid_mbox_get_max_sg(adapter_t *adapter) +{ + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + int nsg; + + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); + + mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; + + memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); + + raw_mbox[0] = MAIN_MISC_OPCODE; + raw_mbox[2] = GET_MAX_SG_SUPPORT; + + // Issue the command + if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { + nsg = *(uint8_t *)adapter->ibuf; + } + else { + nsg = MBOX_DEFAULT_SG_SIZE; + } + + if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE; + + return nsg; +} + + +/** + * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels + * @adapter - soft state for the controller + * + * Enumerate the RAID and SCSI channels for ROMB platoforms so that channels + * can be exported as regular SCSI channels + */ +static void +megaraid_mbox_enum_raid_scsi(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); + + mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h; + + memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE); + + raw_mbox[0] = CHNL_CLASS; + raw_mbox[2] = GET_CHNL_CLASS; + + // Issue the command. If the command fails, all channels are RAID + // channels + raid_dev->channel_class = 0xFF; + if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) { + raid_dev->channel_class = *(uint8_t *)adapter->ibuf; + } + + return; +} + + +/** + * megaraid_mbox_flush_cache - flush adapter and disks cache + * @param adapter : soft state for the controller + * + * Flush adapter cache followed by disks cache + */ +static void +megaraid_mbox_flush_cache(adapter_t *adapter) +{ + mbox_t *mbox; + uint8_t raw_mbox[sizeof(mbox_t)]; + + + mbox = (mbox_t *)raw_mbox; + + memset((caddr_t)raw_mbox, 0, sizeof(mbox_t)); + + raw_mbox[0] = FLUSH_ADAPTER; + + if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { + con_log(CL_ANN, ("megaraid: flush adapter failed\n")); + } + + raw_mbox[0] = FLUSH_SYSTEM; + + if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) { + con_log(CL_ANN, ("megaraid: flush disks cache failed\n")); + } + + return; +} + + +/** + * megaraid_mbox_display_scb - display SCB information, mostly debug purposes + * @param adapter : controllers' soft state + * @param scb : SCB to be displayed + * @param level : debug level for console print + * + * Diplay information about the given SCB iff the current debug level is + * verbose + */ +static void +megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb) +{ + mbox_ccb_t *ccb; + struct scsi_cmnd *scp; + mbox_t *mbox; + int level; + int i; + + + ccb = (mbox_ccb_t *)scb->ccb; + scp = scb->scp; + mbox = ccb->mbox; + + level = CL_DLEVEL3; + + con_log(level, (KERN_NOTICE + "megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status, + mbox->cmd, scb->sno)); + + con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n", + mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv, + mbox->numsge)); + + if (!scp) return; + + con_log(level, (KERN_NOTICE "scsi cmnd: ")); + + for (i = 0; i < scp->cmd_len; i++) { + con_log(level, ("%#2.02x ", scp->cmnd[i])); + } + + con_log(level, ("\n")); + + return; +} + + +/** + * megaraid_mbox_setup_device_map - manage device ids + * @adapter : Driver's soft state + * + * Manange the device ids to have an appropraite mapping between the kernel + * scsi addresses and megaraid scsi and logical drive addresses. We export + * scsi devices on their actual addresses, whereas the logical drives are + * exported on a virtual scsi channel. + **/ +static void +megaraid_mbox_setup_device_map(adapter_t *adapter) +{ + uint8_t c; + uint8_t t; + + /* + * First fill the values on the logical drive channel + */ + for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++) + adapter->device_ids[adapter->max_channel][t] = + (t < adapter->init_id) ? t : t - 1; + + adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF; + + /* + * Fill the values on the physical devices channels + */ + for (c = 0; c < adapter->max_channel; c++) + for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++) + adapter->device_ids[c][t] = (c << 8) | t; +} + + +/* + * END: internal commands library + */ + +/* + * START: Interface for the common management module + * + * This is the module, which interfaces with the common mangement module to + * provide support for ioctl and sysfs + */ + +/** + * megaraid_cmm_register - register with the mangement module + * @param adapter : HBA soft state + * + * Register with the management module, which allows applications to issue + * ioctl calls to the drivers. This interface is used by the management module + * to setup sysfs support as well. + */ +static int +megaraid_cmm_register(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + mraid_mmadp_t adp; + scb_t *scb; + mbox_ccb_t *ccb; + int rval; + int i; + + // Allocate memory for the base list of scb for management module. + adapter->uscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_USER_CMDS, + GFP_KERNEL); + + if (adapter->uscb_list == NULL) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __FUNCTION__, + __LINE__)); + return -1; + } + memset(adapter->uscb_list, 0, sizeof(scb_t) * MBOX_MAX_USER_CMDS); + + + // Initialize the synchronization parameters for resources for + // commands for management module + INIT_LIST_HEAD(&adapter->uscb_pool); + + spin_lock_init(USER_FREE_LIST_LOCK(adapter)); + + + + // link all the packets. Note, CCB for commands, coming from the + // commom management module, mailbox physical address are already + // setup by it. We just need placeholder for that in our local command + // control blocks + for (i = 0; i < MBOX_MAX_USER_CMDS; i++) { + + scb = adapter->uscb_list + i; + ccb = raid_dev->uccb_list + i; + + scb->ccb = (caddr_t)ccb; + ccb->mbox64 = raid_dev->umbox64 + i; + ccb->mbox = &ccb->mbox64->mbox32; + ccb->raw_mbox = (uint8_t *)ccb->mbox; + + scb->gp = 0; + + // COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR + // COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER) + scb->sno = i + MBOX_MAX_SCSI_CMDS; + + scb->scp = NULL; + scb->state = SCB_FREE; + scb->dma_direction = PCI_DMA_NONE; + scb->dma_type = MRAID_DMA_NONE; + scb->dev_channel = -1; + scb->dev_target = -1; + + // put scb in the free pool + list_add_tail(&scb->list, &adapter->uscb_pool); + } + + adp.unique_id = adapter->unique_id; + adp.drvr_type = DRVRTYPE_MBOX; + adp.drvr_data = (unsigned long)adapter; + adp.pdev = adapter->pdev; + adp.issue_uioc = megaraid_mbox_mm_handler; + adp.timeout = 300; + adp.max_kioc = MBOX_MAX_USER_CMDS; + + if ((rval = mraid_mm_register_adp(&adp)) != 0) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid mbox: did not register with CMM\n")); + + kfree(adapter->uscb_list); + } + + return rval; +} + + +/** + * megaraid_cmm_unregister - un-register with the mangement module + * @param adapter : HBA soft state + * + * Un-register with the management module. + * FIXME: mgmt module must return failure for unregister if it has pending + * commands in LLD + */ +static int +megaraid_cmm_unregister(adapter_t *adapter) +{ + kfree(adapter->uscb_list); + mraid_mm_unregister_adp(adapter->unique_id); + return 0; +} + + +/** + * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD + * @param drvr_data : LLD specific data + * @param kioc : CMM interface packet + * @param action : command action + * + * This routine is invoked whenever the Common Mangement Module (CMM) has a + * command for us. The 'action' parameter specifies if this is a new command + * or otherwise. + */ +static int +megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action) +{ + adapter_t *adapter; + + if (action != IOCTL_ISSUE) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: unsupported management action:%#2x\n", + action)); + return (-ENOTSUPP); + } + + adapter = (adapter_t *)drvr_data; + + // make sure this adapter is not being detached right now. + if (atomic_read(&adapter->being_detached)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid: reject management request, detaching\n")); + return (-ENODEV); + } + + switch (kioc->opcode) { + + case GET_ADAP_INFO: + + kioc->status = gather_hbainfo(adapter, (mraid_hba_info_t *) + (unsigned long)kioc->buf_vaddr); + + kioc->done(kioc); + + return kioc->status; + + case MBOX_CMD: + + return megaraid_mbox_mm_command(adapter, kioc); + + default: + kioc->status = (-EINVAL); + kioc->done(kioc); + return (-EINVAL); + } + + return 0; // not reached +} + +/** + * megaraid_mbox_mm_command - issues commands routed through CMM + * @param adapter : HBA soft state + * @param kioc : management command packet + * + * Issues commands, which are routed through the management module. + */ +static int +megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc) +{ + struct list_head *head = &adapter->uscb_pool; + mbox64_t *mbox64; + uint8_t *raw_mbox; + scb_t *scb; + mbox_ccb_t *ccb; + unsigned long flags; + + // detach one scb from free pool + spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags); + + if (list_empty(head)) { // should never happen because of CMM + + con_log(CL_ANN, (KERN_WARNING + "megaraid mbox: bug in cmm handler, lost resources\n")); + + spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); + + return (-EINVAL); + } + + scb = list_entry(head->next, scb_t, list); + list_del_init(&scb->list); + + spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); + + scb->state = SCB_ACTIVE; + scb->dma_type = MRAID_DMA_NONE; + scb->dma_direction = PCI_DMA_NONE; + + ccb = (mbox_ccb_t *)scb->ccb; + mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; + raw_mbox = (uint8_t *)&mbox64->mbox32; + + memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t)); + + scb->gp = (unsigned long)kioc; + + /* + * If it is a logdrv random delete operation, we have to wait till + * there are no outstanding cmds at the fw and then issue it directly + */ + if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) { + + if (wait_till_fw_empty(adapter)) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid mbox: LD delete, timed out\n")); + + kioc->status = -ETIME; + + scb->status = -1; + + megaraid_mbox_mm_done(adapter, scb); + + return (-ETIME); + } + + INIT_LIST_HEAD(&scb->list); + + scb->state = SCB_ISSUED; + if (mbox_post_cmd(adapter, scb) != 0) { + + con_log(CL_ANN, (KERN_NOTICE + "megaraid mbox: LD delete, mailbox busy\n")); + + kioc->status = -EBUSY; + + scb->status = -1; + + megaraid_mbox_mm_done(adapter, scb); + + return (-EBUSY); + } + + return 0; + } + + // put the command on the pending list and execute + megaraid_mbox_runpendq(adapter, scb); + + return 0; +} + + +static int +wait_till_fw_empty(adapter_t *adapter) +{ + unsigned long flags = 0; + int i; + + + /* + * Set the quiescent flag to stop issuing cmds to FW. + */ + spin_lock_irqsave(adapter->host_lock, flags); + adapter->quiescent++; + spin_unlock_irqrestore(adapter->host_lock, flags); + + /* + * Wait till there are no more cmds outstanding at FW. Try for at most + * 60 seconds + */ + for (i = 0; i < 60 && adapter->outstanding_cmds; i++) { + con_log(CL_DLEVEL1, (KERN_INFO + "megaraid: FW has %d pending commands\n", + adapter->outstanding_cmds)); + + msleep(1000); + } + + return adapter->outstanding_cmds; +} + + +/** + * megaraid_mbox_mm_done - callback for CMM commands + * @adapter : HBA soft state + * @scb : completed command + * + * Callback routine for internal commands originated from the management + * module. + */ +static void +megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb) +{ + uioc_t *kioc; + mbox64_t *mbox64; + uint8_t *raw_mbox; + unsigned long flags; + + kioc = (uioc_t *)scb->gp; + kioc->status = 0; + mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; + mbox64->mbox32.status = scb->status; + raw_mbox = (uint8_t *)&mbox64->mbox32; + + + // put scb in the free pool + scb->state = SCB_FREE; + scb->scp = NULL; + + spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags); + + list_add(&scb->list, &adapter->uscb_pool); + + spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags); + + // if a delete logical drive operation succeeded, restart the + // controller + if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) { + + adapter->quiescent--; + + megaraid_mbox_runpendq(adapter, NULL); + } + + kioc->done(kioc); + + return; +} + + +/** + * gather_hbainfo - HBA characteristics for the applications + * @param adapter : HBA soft state + * @param hinfo : pointer to the caller's host info strucuture + */ +static int +gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo) +{ + uint8_t dmajor; + + dmajor = megaraid_mbox_version[0]; + + hinfo->pci_vendor_id = adapter->pdev->vendor; + hinfo->pci_device_id = adapter->pdev->device; + hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor; + hinfo->subsys_device_id = adapter->pdev->subsystem_device; + + hinfo->pci_bus = adapter->pdev->bus->number; + hinfo->pci_dev_fn = adapter->pdev->devfn; + hinfo->pci_slot = PCI_SLOT(adapter->pdev->devfn); + hinfo->irq = adapter->host->irq; + hinfo->baseport = ADAP2RAIDDEV(adapter)->baseport; + + hinfo->unique_id = (hinfo->pci_bus << 8) | adapter->pdev->devfn; + hinfo->host_no = adapter->host->host_no; + + return 0; +} + +/* + * END: Interface for the common management module + */ + + + +/** + * megaraid_sysfs_alloc_resources - allocate sysfs related resources + * + * Allocate packets required to issue FW calls whenever the sysfs attributes + * are read. These attributes would require up-to-date information from the + * FW. Also set up resources for mutual exclusion to share these resources and + * the wait queue. + * + * @param adapter : controller's soft state + * + * @return 0 on success + * @return -ERROR_CODE on failure + */ +static int +megaraid_sysfs_alloc_resources(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + int rval = 0; + + raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL); + + raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL); + + raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev, + PAGE_SIZE, &raid_dev->sysfs_buffer_dma); + + if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 || + !raid_dev->sysfs_buffer) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid: out of memory, %s %d\n", __FUNCTION__, + __LINE__)); + + rval = -ENOMEM; + + megaraid_sysfs_free_resources(adapter); + } + + sema_init(&raid_dev->sysfs_sem, 1); + + init_waitqueue_head(&raid_dev->sysfs_wait_q); + + return rval; +} + + +/** + * megaraid_sysfs_free_resources - free sysfs related resources + * + * Free packets allocated for sysfs FW commands + * + * @param adapter : controller's soft state + */ +static void +megaraid_sysfs_free_resources(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + + if (raid_dev->sysfs_uioc) kfree(raid_dev->sysfs_uioc); + + if (raid_dev->sysfs_mbox64) kfree(raid_dev->sysfs_mbox64); + + if (raid_dev->sysfs_buffer) { + pci_free_consistent(adapter->pdev, PAGE_SIZE, + raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma); + } +} + + +/** + * megaraid_sysfs_get_ldmap_done - callback for get ldmap + * + * Callback routine called in the ISR/tasklet context for get ldmap call + * + * @param uioc : completed packet + */ +static void +megaraid_sysfs_get_ldmap_done(uioc_t *uioc) +{ + adapter_t *adapter = (adapter_t *)uioc->buf_vaddr; + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + + uioc->status = 0; + + wake_up(&raid_dev->sysfs_wait_q); +} + + +/** + * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap + * + * Timeout routine to recover and return to application, in case the adapter + * has stopped responding. A timeout of 60 seconds for this command seem like + * a good value + * + * @param uioc : timed out packet + */ +static void +megaraid_sysfs_get_ldmap_timeout(unsigned long data) +{ + uioc_t *uioc = (uioc_t *)data; + adapter_t *adapter = (adapter_t *)uioc->buf_vaddr; + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + + uioc->status = -ETIME; + + wake_up(&raid_dev->sysfs_wait_q); +} + + +/** + * megaraid_sysfs_get_ldmap - get update logical drive map + * + * This routine will be called whenever user reads the logical drive + * attributes, go get the current logical drive mapping table from the + * firmware. We use the managment API's to issue commands to the controller. + * + * NOTE: The commands issuance functionality is not generalized and + * implemented in context of "get ld map" command only. If required, the + * command issuance logical can be trivially pulled out and implemented as a + * standalone libary. For now, this should suffice since there is no other + * user of this interface. + * + * @param adapter : controller's soft state + * + * @return 0 on success + * @return -1 on failure + */ +static int +megaraid_sysfs_get_ldmap(adapter_t *adapter) +{ + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + uioc_t *uioc; + mbox64_t *mbox64; + mbox_t *mbox; + char *raw_mbox; + struct timer_list sysfs_timer; + struct timer_list *timerp; + caddr_t ldmap; + int rval = 0; + + /* + * Allow only one read at a time to go through the sysfs attributes + */ + down(&raid_dev->sysfs_sem); + + uioc = raid_dev->sysfs_uioc; + mbox64 = raid_dev->sysfs_mbox64; + ldmap = raid_dev->sysfs_buffer; + + memset(uioc, 0, sizeof(uioc_t)); + memset(mbox64, 0, sizeof(mbox64_t)); + memset(ldmap, 0, sizeof(raid_dev->curr_ldmap)); + + mbox = &mbox64->mbox32; + raw_mbox = (char *)mbox; + uioc->cmdbuf = (uint64_t)(unsigned long)mbox64; + uioc->buf_vaddr = (caddr_t)adapter; + uioc->status = -ENODATA; + uioc->done = megaraid_sysfs_get_ldmap_done; + + /* + * Prepare the mailbox packet to get the current logical drive mapping + * table + */ + mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma; + + raw_mbox[0] = FC_DEL_LOGDRV; + raw_mbox[2] = OP_GET_LDID_MAP; + + /* + * Setup a timer to recover from a non-responding controller + */ + timerp = &sysfs_timer; + init_timer(timerp); + + timerp->function = megaraid_sysfs_get_ldmap_timeout; + timerp->data = (unsigned long)uioc; + timerp->expires = jiffies + 60 * HZ; + + add_timer(timerp); + + /* + * Send the command to the firmware + */ + rval = megaraid_mbox_mm_command(adapter, uioc); + + if (rval == 0) { // command successfully issued + wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA)); + + /* + * Check if the command timed out + */ + if (uioc->status == -ETIME) { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: sysfs get ld map timed out\n")); + + rval = -ETIME; + } + else { + rval = mbox->status; + } + + if (rval == 0) { + memcpy(raid_dev->curr_ldmap, ldmap, + sizeof(raid_dev->curr_ldmap)); + } + else { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: get ld map failed with %x\n", rval)); + } + } + else { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: could not issue ldmap command:%x\n", rval)); + } + + + del_timer_sync(timerp); + + up(&raid_dev->sysfs_sem); + + return rval; +} + + +/** + * megaraid_sysfs_show_app_hndl - display application handle for this adapter + * + * Display the handle used by the applications while executing management + * tasks on the adapter. We invoke a management module API to get the adapter + * handle, since we do not interface with applications directly. + * + * @param cdev : class device object representation for the host + * @param buf : buffer to send data to + */ +static ssize_t +megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost); + uint32_t app_hndl; + + app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id); + + return snprintf(buf, 8, "%u\n", app_hndl); +} + + +/** + * megaraid_sysfs_show_ldnum - display the logical drive number for this device + * + * Display the logical drive number for the device in question, if it a valid + * logical drive. For physical devices, "-1" is returned + * The logical drive number is displayed in following format + * + * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE> + * <int> <int> <int> <int> + * + * @param dev : device object representation for the scsi device + * @param buf : buffer to send data to + */ +static ssize_t +megaraid_sysfs_show_ldnum(struct device *dev, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host); + mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); + int scsi_id = -1; + int logical_drv = -1; + int ldid_map = -1; + uint32_t app_hndl = 0; + int mapped_sdev_id; + int rval; + int i; + + if (raid_dev->random_del_supported && + MRAID_IS_LOGICAL_SDEV(adapter, sdev)) { + + rval = megaraid_sysfs_get_ldmap(adapter); + if (rval == 0) { + + for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) { + + mapped_sdev_id = sdev->id; + + if (sdev->id > adapter->init_id) { + mapped_sdev_id -= 1; + } + + if (raid_dev->curr_ldmap[i] == mapped_sdev_id) { + + scsi_id = sdev->id; + + logical_drv = i; + + ldid_map = raid_dev->curr_ldmap[i]; + + app_hndl = mraid_mm_adapter_app_handle( + adapter->unique_id); + + break; + } + } + } + else { + con_log(CL_ANN, (KERN_NOTICE + "megaraid: sysfs get ld map failed: %x\n", + rval)); + } + } + + return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv, + ldid_map, app_hndl); +} + + +/* + * END: Mailbox Low Level Driver + */ +module_init(megaraid_init); +module_exit(megaraid_exit); + +/* vim: set ts=8 sw=8 tw=78 ai si: */ diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h new file mode 100644 index 000000000000..07510009d110 --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_mbox.h @@ -0,0 +1,288 @@ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : megaraid_mbox.h + */ + +#ifndef _MEGARAID_H_ +#define _MEGARAID_H_ + + +#include "mega_common.h" +#include "mbox_defs.h" +#include "megaraid_ioctl.h" + + +#define MEGARAID_VERSION "2.20.4.5" +#define MEGARAID_EXT_VERSION "(Release Date: Thu Feb 03 12:27:22 EST 2005)" + + +/* + * Define some PCI values here until they are put in the kernel + */ +#define PCI_DEVICE_ID_PERC4_DI_DISCOVERY 0x000E +#define PCI_SUBSYS_ID_PERC4_DI_DISCOVERY 0x0123 + +#define PCI_DEVICE_ID_PERC4_SC 0x1960 +#define PCI_SUBSYS_ID_PERC4_SC 0x0520 + +#define PCI_DEVICE_ID_PERC4_DC 0x1960 +#define PCI_SUBSYS_ID_PERC4_DC 0x0518 + +#define PCI_DEVICE_ID_PERC4_QC 0x0407 +#define PCI_SUBSYS_ID_PERC4_QC 0x0531 + +#define PCI_DEVICE_ID_PERC4_DI_EVERGLADES 0x000F +#define PCI_SUBSYS_ID_PERC4_DI_EVERGLADES 0x014A + +#define PCI_DEVICE_ID_PERC4E_SI_BIGBEND 0x0013 +#define PCI_SUBSYS_ID_PERC4E_SI_BIGBEND 0x016c + +#define PCI_DEVICE_ID_PERC4E_DI_KOBUK 0x0013 +#define PCI_SUBSYS_ID_PERC4E_DI_KOBUK 0x016d + +#define PCI_DEVICE_ID_PERC4E_DI_CORVETTE 0x0013 +#define PCI_SUBSYS_ID_PERC4E_DI_CORVETTE 0x016e + +#define PCI_DEVICE_ID_PERC4E_DI_EXPEDITION 0x0013 +#define PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION 0x016f + +#define PCI_DEVICE_ID_PERC4E_DI_GUADALUPE 0x0013 +#define PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE 0x0170 + +#define PCI_DEVICE_ID_PERC4E_DC_320_2E 0x0408 +#define PCI_SUBSYS_ID_PERC4E_DC_320_2E 0x0002 + +#define PCI_DEVICE_ID_PERC4E_SC_320_1E 0x0408 +#define PCI_SUBSYS_ID_PERC4E_SC_320_1E 0x0001 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_0 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_0 0xA520 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_1 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_1 0x0520 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_2 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_2 0x0518 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_0x 0x0407 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_0x 0x0530 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_2x 0x0407 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_2x 0x0532 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_4x 0x0407 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_4x 0x0531 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_1E 0x0408 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_1E 0x0001 + +#define PCI_DEVICE_ID_MEGARAID_SCSI_320_2E 0x0408 +#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_2E 0x0002 + +#define PCI_DEVICE_ID_MEGARAID_I4_133_RAID 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_I4_133_RAID 0x0522 + +#define PCI_DEVICE_ID_MEGARAID_SATA_150_4 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_SATA_150_4 0x4523 + +#define PCI_DEVICE_ID_MEGARAID_SATA_150_6 0x1960 +#define PCI_SUBSYS_ID_MEGARAID_SATA_150_6 0x0523 + +#define PCI_DEVICE_ID_MEGARAID_SATA_300_4x 0x0409 +#define PCI_SUBSYS_ID_MEGARAID_SATA_300_4x 0x3004 + +#define PCI_DEVICE_ID_MEGARAID_SATA_300_8x 0x0409 +#define PCI_SUBSYS_ID_MEGARAID_SATA_300_8x 0x3008 + +#define PCI_DEVICE_ID_INTEL_RAID_SRCU42X 0x0407 +#define PCI_SUBSYS_ID_INTEL_RAID_SRCU42X 0x0532 + +#define PCI_DEVICE_ID_INTEL_RAID_SRCS16 0x1960 +#define PCI_SUBSYS_ID_INTEL_RAID_SRCS16 0x0523 + +#define PCI_DEVICE_ID_INTEL_RAID_SRCU42E 0x0408 +#define PCI_SUBSYS_ID_INTEL_RAID_SRCU42E 0x0002 + +#define PCI_DEVICE_ID_INTEL_RAID_SRCZCRX 0x0407 +#define PCI_SUBSYS_ID_INTEL_RAID_SRCZCRX 0x0530 + +#define PCI_DEVICE_ID_INTEL_RAID_SRCS28X 0x0409 +#define PCI_SUBSYS_ID_INTEL_RAID_SRCS28X 0x3008 + +#define PCI_DEVICE_ID_INTEL_RAID_SROMBU42E_ALIEF 0x0408 +#define PCI_SUBSYS_ID_INTEL_RAID_SROMBU42E_ALIEF 0x3431 + +#define PCI_DEVICE_ID_INTEL_RAID_SROMBU42E_HARWICH 0x0408 +#define PCI_SUBSYS_ID_INTEL_RAID_SROMBU42E_HARWICH 0x3499 + +#define PCI_DEVICE_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK 0x1960 +#define PCI_SUBSYS_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK 0x0520 + +#define PCI_DEVICE_ID_FSC_MEGARAID_PCI_EXPRESS_ROMB 0x0408 +#define PCI_SUBSYS_ID_FSC_MEGARAID_PCI_EXPRESS_ROMB 0x1065 + +#define PCI_DEVICE_ID_MEGARAID_ACER_ROMB_2E 0x0408 +#define PCI_SUBSYS_ID_MEGARAID_ACER_ROMB_2E 0x004D + +#define PCI_SUBSYS_ID_PERC3_QC 0x0471 +#define PCI_SUBSYS_ID_PERC3_DC 0x0493 +#define PCI_SUBSYS_ID_PERC3_SC 0x0475 + +#define PCI_DEVICE_ID_MEGARAID_NEC_ROMB_2E 0x0408 +#define PCI_SUBSYS_ID_MEGARAID_NEC_ROMB_2E 0x8287 + +#ifndef PCI_SUBSYS_ID_FSC +#define PCI_SUBSYS_ID_FSC 0x1734 +#endif + +#define MBOX_MAX_SCSI_CMDS 128 // number of cmds reserved for kernel +#define MBOX_MAX_USER_CMDS 32 // number of cmds for applications +#define MBOX_DEF_CMD_PER_LUN 64 // default commands per lun +#define MBOX_DEFAULT_SG_SIZE 26 // default sg size supported by all fw +#define MBOX_MAX_SG_SIZE 32 // maximum scatter-gather list size +#define MBOX_MAX_SECTORS 128 // maximum sectors per IO +#define MBOX_TIMEOUT 30 // timeout value for internal cmds +#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox +#define MBOX_RESET_WAIT 180 // wait these many seconds in reset +#define MBOX_RESET_EXT_WAIT 120 // extended wait reset + +/* + * maximum transfer that can happen through the firmware commands issued + * internnaly from the driver. + */ +#define MBOX_IBUF_SIZE 4096 + + +/** + * mbox_ccb_t - command control block specific to mailbox based controllers + * @raw_mbox : raw mailbox pointer + * @mbox : mailbox + * @mbox64 : extended mailbox + * @mbox_dma_h : maibox dma address + * @sgl64 : 64-bit scatter-gather list + * @sgl32 : 32-bit scatter-gather list + * @sgl_dma_h : dma handle for the scatter-gather list + * @pthru : passthru structure + * @pthru_dma_h : dma handle for the passthru structure + * @epthru : extended passthru structure + * @epthru_dma_h : dma handle for extended passthru structure + * @buf_dma_h : dma handle for buffers w/o sg list + * + * command control block specific to the mailbox based controllers + */ +typedef struct { + uint8_t *raw_mbox; + mbox_t *mbox; + mbox64_t *mbox64; + dma_addr_t mbox_dma_h; + mbox_sgl64 *sgl64; + mbox_sgl32 *sgl32; + dma_addr_t sgl_dma_h; + mraid_passthru_t *pthru; + dma_addr_t pthru_dma_h; + mraid_epassthru_t *epthru; + dma_addr_t epthru_dma_h; + dma_addr_t buf_dma_h; +} mbox_ccb_t; + + +/** + * mraid_device_t - adapter soft state structure for mailbox controllers + * @param una_mbox64 : 64-bit mbox - unaligned + * @param una_mbox64_dma : mbox dma addr - unaligned + * @param mbox : 32-bit mbox - aligned + * @param mbox64 : 64-bit mbox - aligned + * @param mbox_dma : mbox dma addr - aligned + * @param mailbox_lock : exclusion lock for the mailbox + * @param baseport : base port of hba memory + * @param baseaddr : mapped addr of hba memory + * @param mbox_pool : pool of mailboxes + * @param mbox_pool_handle : handle for the mailbox pool memory + * @param epthru_pool : a pool for extended passthru commands + * @param epthru_pool_handle : handle to the pool above + * @param sg_pool : pool of scatter-gather lists for this driver + * @param sg_pool_handle : handle to the pool above + * @param ccb_list : list of our command control blocks + * @param uccb_list : list of cmd control blocks for mgmt module + * @param umbox64 : array of mailbox for user commands (cmm) + * @param pdrv_state : array for state of each physical drive. + * @param last_disp : flag used to show device scanning + * @param hw_error : set if FW not responding + * @param fast_load : If set, skip physical device scanning + * @channel_class : channel class, RAID or SCSI + * @sysfs_sem : semaphore to serialize access to sysfs res. + * @sysfs_uioc : management packet to issue FW calls from sysfs + * @sysfs_mbox64 : mailbox packet to issue FW calls from sysfs + * @sysfs_buffer : data buffer for FW commands issued from sysfs + * @sysfs_buffer_dma : DMA buffer for FW commands issued from sysfs + * @sysfs_wait_q : wait queue for sysfs operations + * @random_del_supported : set if the random deletion is supported + * @curr_ldmap : current LDID map + * + * Initialization structure for mailbox controllers: memory based and IO based + * All the fields in this structure are LLD specific and may be discovered at + * init() or start() time. + * + * NOTE: The fields of this structures are placed to minimize cache misses + */ +#define MAX_LD_EXTENDED64 64 +typedef struct { + mbox64_t *una_mbox64; + dma_addr_t una_mbox64_dma; + mbox_t *mbox; + mbox64_t *mbox64; + dma_addr_t mbox_dma; + spinlock_t mailbox_lock; + unsigned long baseport; + void __iomem * baseaddr; + struct mraid_pci_blk mbox_pool[MBOX_MAX_SCSI_CMDS]; + struct dma_pool *mbox_pool_handle; + struct mraid_pci_blk epthru_pool[MBOX_MAX_SCSI_CMDS]; + struct dma_pool *epthru_pool_handle; + struct mraid_pci_blk sg_pool[MBOX_MAX_SCSI_CMDS]; + struct dma_pool *sg_pool_handle; + mbox_ccb_t ccb_list[MBOX_MAX_SCSI_CMDS]; + mbox_ccb_t uccb_list[MBOX_MAX_USER_CMDS]; + mbox64_t umbox64[MBOX_MAX_USER_CMDS]; + + uint8_t pdrv_state[MBOX_MAX_PHYSICAL_DRIVES]; + uint32_t last_disp; + int hw_error; + int fast_load; + uint8_t channel_class; + struct semaphore sysfs_sem; + uioc_t *sysfs_uioc; + mbox64_t *sysfs_mbox64; + caddr_t sysfs_buffer; + dma_addr_t sysfs_buffer_dma; + wait_queue_head_t sysfs_wait_q; + int random_del_supported; + uint16_t curr_ldmap[MAX_LD_EXTENDED64]; +} mraid_device_t; + +// route to raid device from adapter +#define ADAP2RAIDDEV(adp) ((mraid_device_t *)((adp)->raid_device)) + +#define MAILBOX_LOCK(rdev) (&(rdev)->mailbox_lock) + +// Find out if this channel is a RAID or SCSI +#define IS_RAID_CH(rdev, ch) (((rdev)->channel_class >> (ch)) & 0x01) + + +#define RDINDOOR(rdev) readl((rdev)->baseaddr + 0x20) +#define RDOUTDOOR(rdev) readl((rdev)->baseaddr + 0x2C) +#define WRINDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x20) +#define WROUTDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x2C) + +#endif // _MEGARAID_H_ + +// vim: set ts=8 sw=8 tw=78: diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c new file mode 100644 index 000000000000..9f1b550713ec --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -0,0 +1,1255 @@ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : megaraid_mm.c + * Version : v2.20.2.5 (Jan 21 2005) + * + * Common management module + */ + +#include "megaraid_mm.h" +#include <linux/smp_lock.h> + + +// Entry points for char node driver +static int mraid_mm_open(struct inode *, struct file *); +static int mraid_mm_ioctl(struct inode *, struct file *, uint, unsigned long); + + +// routines to convert to and from the old the format +static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *); +static int kioc_to_mimd(uioc_t *, mimd_t __user *); + + +// Helper functions +static int handle_drvrcmd(void __user *, uint8_t, int *); +static int lld_ioctl(mraid_mmadp_t *, uioc_t *); +static void ioctl_done(uioc_t *); +static void lld_timedout(unsigned long); +static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *); +static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *); +static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *); +static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *); +static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int); +static int mraid_mm_setup_dma_pools(mraid_mmadp_t *); +static void mraid_mm_free_adp_resources(mraid_mmadp_t *); +static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *); + +#ifdef CONFIG_COMPAT +static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long); +#endif + +MODULE_AUTHOR("LSI Logic Corporation"); +MODULE_DESCRIPTION("LSI Logic Management Module"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(LSI_COMMON_MOD_VERSION); + +static int dbglevel = CL_ANN; +module_param_named(dlevel, dbglevel, int, 0); +MODULE_PARM_DESC(dlevel, "Debug level (default=0)"); + +EXPORT_SYMBOL(mraid_mm_register_adp); +EXPORT_SYMBOL(mraid_mm_unregister_adp); +EXPORT_SYMBOL(mraid_mm_adapter_app_handle); + +static int majorno; +static uint32_t drvr_ver = 0x02200201; + +static int adapters_count_g; +static struct list_head adapters_list_g; + +static wait_queue_head_t wait_q; + +static struct file_operations lsi_fops = { + .open = mraid_mm_open, + .ioctl = mraid_mm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = mraid_mm_compat_ioctl, +#endif + .owner = THIS_MODULE, +}; + +/** + * mraid_mm_open - open routine for char node interface + * @inod : unused + * @filep : unused + * + * allow ioctl operations by apps only if they superuser privilege + */ +static int +mraid_mm_open(struct inode *inode, struct file *filep) +{ + /* + * Only allow superuser to access private ioctl interface + */ + if (!capable(CAP_SYS_ADMIN)) return (-EACCES); + + return 0; +} + +/** + * mraid_mm_ioctl - module entry-point for ioctls + * @inode : inode (ignored) + * @filep : file operations pointer (ignored) + * @cmd : ioctl command + * @arg : user ioctl packet + */ +static int +mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, + unsigned long arg) +{ + uioc_t *kioc; + char signature[EXT_IOCTL_SIGN_SZ] = {0}; + int rval; + mraid_mmadp_t *adp; + uint8_t old_ioctl; + int drvrcmd_rval; + void __user *argp = (void __user *)arg; + + /* + * Make sure only USCSICMD are issued through this interface. + * MIMD application would still fire different command. + */ + + if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) { + return (-EINVAL); + } + + /* + * Look for signature to see if this is the new or old ioctl format. + */ + if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) { + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: copy from usr addr failed\n")); + return (-EFAULT); + } + + if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0) + old_ioctl = 0; + else + old_ioctl = 1; + + /* + * At present, we don't support the new ioctl packet + */ + if (!old_ioctl ) + return (-EINVAL); + + /* + * If it is a driver ioctl (as opposed to fw ioctls), then we can + * handle the command locally. rval > 0 means it is not a drvr cmd + */ + rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval); + + if (rval < 0) + return rval; + else if (rval == 0) + return drvrcmd_rval; + + rval = 0; + if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) { + return rval; + } + + /* + * Check if adapter can accept ioctl. We may have marked it offline + * if any previous kioc had timedout on this controller. + */ + if (!adp->quiescent) { + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: controller cannot accept cmds due to " + "earlier errors\n" )); + return -EFAULT; + } + + /* + * The following call will block till a kioc is available + */ + kioc = mraid_mm_alloc_kioc(adp); + + /* + * User sent the old mimd_t ioctl packet. Convert it to uioc_t. + */ + if ((rval = mimd_to_kioc(argp, adp, kioc))) { + mraid_mm_dealloc_kioc(adp, kioc); + return rval; + } + + kioc->done = ioctl_done; + + /* + * Issue the IOCTL to the low level driver. After the IOCTL completes + * release the kioc if and only if it was _not_ timedout. If it was + * timedout, that means that resources are still with low level driver. + */ + if ((rval = lld_ioctl(adp, kioc))) { + + if (!kioc->timedout) + mraid_mm_dealloc_kioc(adp, kioc); + + return rval; + } + + /* + * Convert the kioc back to user space + */ + rval = kioc_to_mimd(kioc, argp); + + /* + * Return the kioc to free pool + */ + mraid_mm_dealloc_kioc(adp, kioc); + + return rval; +} + + +/** + * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet + * @umimd : User space mimd_t ioctl packet + * @adapter : pointer to the adapter (OUT) + */ +static mraid_mmadp_t * +mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) +{ + mraid_mmadp_t *adapter; + mimd_t mimd; + uint32_t adapno; + int iterator; + + + if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { + *rval = -EFAULT; + return NULL; + } + + adapno = GETADAP(mimd.ui.fcs.adapno); + + if (adapno >= adapters_count_g) { + *rval = -ENODEV; + return NULL; + } + + adapter = NULL; + iterator = 0; + + list_for_each_entry(adapter, &adapters_list_g, list) { + if (iterator++ == adapno) break; + } + + if (!adapter) { + *rval = -ENODEV; + return NULL; + } + + return adapter; +} + +/* + * handle_drvrcmd - This routine checks if the opcode is a driver + * cmd and if it is, handles it. + * @arg : packet sent by the user app + * @old_ioctl : mimd if 1; uioc otherwise + */ +static int +handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval) +{ + mimd_t __user *umimd; + mimd_t kmimd; + uint8_t opcode; + uint8_t subopcode; + + if (old_ioctl) + goto old_packet; + else + goto new_packet; + +new_packet: + return (-ENOTSUPP); + +old_packet: + *rval = 0; + umimd = arg; + + if (copy_from_user(&kmimd, umimd, sizeof(mimd_t))) + return (-EFAULT); + + opcode = kmimd.ui.fcs.opcode; + subopcode = kmimd.ui.fcs.subopcode; + + /* + * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or + * GET_NUMADP, then we can handle. Otherwise we should return 1 to + * indicate that we cannot handle this. + */ + if (opcode != 0x82) + return 1; + + switch (subopcode) { + + case MEGAIOC_QDRVRVER: + + if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t))) + return (-EFAULT); + + return 0; + + case MEGAIOC_QNADAP: + + *rval = adapters_count_g; + + if (copy_to_user(kmimd.data, &adapters_count_g, + sizeof(uint32_t))) + return (-EFAULT); + + return 0; + + default: + /* cannot handle */ + return 1; + } + + return 0; +} + + +/** + * mimd_to_kioc - Converter from old to new ioctl format + * + * @umimd : user space old MIMD IOCTL + * @kioc : kernel space new format IOCTL + * + * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The + * new packet is in kernel space so that driver can perform operations on it + * freely. + */ + +static int +mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc) +{ + mbox64_t *mbox64; + mbox_t *mbox; + mraid_passthru_t *pthru32; + uint32_t adapno; + uint8_t opcode; + uint8_t subopcode; + mimd_t mimd; + + if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) + return (-EFAULT); + + /* + * Applications are not allowed to send extd pthru + */ + if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) || + (mimd.mbox[0] == MBOXCMD_EXTPTHRU)) + return (-EINVAL); + + opcode = mimd.ui.fcs.opcode; + subopcode = mimd.ui.fcs.subopcode; + adapno = GETADAP(mimd.ui.fcs.adapno); + + if (adapno >= adapters_count_g) + return (-ENODEV); + + kioc->adapno = adapno; + kioc->mb_type = MBOX_LEGACY; + kioc->app_type = APPTYPE_MIMD; + + switch (opcode) { + + case 0x82: + + if (subopcode == MEGAIOC_QADAPINFO) { + + kioc->opcode = GET_ADAP_INFO; + kioc->data_dir = UIOC_RD; + kioc->xferlen = sizeof(mraid_hba_info_t); + + if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) + return (-ENOMEM); + } + else { + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: Invalid subop\n")); + return (-EINVAL); + } + + break; + + case 0x81: + + kioc->opcode = MBOX_CMD; + kioc->xferlen = mimd.ui.fcs.length; + kioc->user_data_len = kioc->xferlen; + kioc->user_data = mimd.ui.fcs.buffer; + + if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) + return (-ENOMEM); + + if (mimd.outlen) kioc->data_dir = UIOC_RD; + if (mimd.inlen) kioc->data_dir |= UIOC_WR; + + break; + + case 0x80: + + kioc->opcode = MBOX_CMD; + kioc->xferlen = (mimd.outlen > mimd.inlen) ? + mimd.outlen : mimd.inlen; + kioc->user_data_len = kioc->xferlen; + kioc->user_data = mimd.data; + + if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen)) + return (-ENOMEM); + + if (mimd.outlen) kioc->data_dir = UIOC_RD; + if (mimd.inlen) kioc->data_dir |= UIOC_WR; + + break; + + default: + return (-EINVAL); + } + + /* + * If driver command, nothing else to do + */ + if (opcode == 0x82) + return 0; + + /* + * This is a mailbox cmd; copy the mailbox from mimd + */ + mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf); + mbox = &mbox64->mbox32; + memcpy(mbox, mimd.mbox, 14); + + if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD + + mbox->xferaddr = (uint32_t)kioc->buf_paddr; + + if (kioc->data_dir & UIOC_WR) { + if (copy_from_user(kioc->buf_vaddr, kioc->user_data, + kioc->xferlen)) { + return (-EFAULT); + } + } + + return 0; + } + + /* + * This is a regular 32-bit pthru cmd; mbox points to pthru struct. + * Just like in above case, the beginning for memblk is treated as + * a mailbox. The passthru will begin at next 1K boundary. And the + * data will start 1K after that. + */ + pthru32 = kioc->pthru32; + kioc->user_pthru = &umimd->pthru; + mbox->xferaddr = (uint32_t)kioc->pthru32_h; + + if (copy_from_user(pthru32, kioc->user_pthru, + sizeof(mraid_passthru_t))) { + return (-EFAULT); + } + + pthru32->dataxferaddr = kioc->buf_paddr; + if (kioc->data_dir & UIOC_WR) { + if (copy_from_user(kioc->buf_vaddr, kioc->user_data, + pthru32->dataxferlen)) { + return (-EFAULT); + } + } + + return 0; +} + +/** + * mraid_mm_attch_buf - Attach a free dma buffer for required size + * + * @adp : Adapter softstate + * @kioc : kioc that the buffer needs to be attached to + * @xferlen : required length for buffer + * + * First we search for a pool with smallest buffer that is >= @xferlen. If + * that pool has no free buffer, we will try for the next bigger size. If none + * is available, we will try to allocate the smallest buffer that is >= + * @xferlen and attach it the pool. + */ +static int +mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen) +{ + mm_dmapool_t *pool; + int right_pool = -1; + unsigned long flags; + int i; + + kioc->pool_index = -1; + kioc->buf_vaddr = NULL; + kioc->buf_paddr = 0; + kioc->free_buf = 0; + + /* + * We need xferlen amount of memory. See if we can get it from our + * dma pools. If we don't get exact size, we will try bigger buffer + */ + + for (i = 0; i < MAX_DMA_POOLS; i++) { + + pool = &adp->dma_pool_list[i]; + + if (xferlen > pool->buf_size) + continue; + + if (right_pool == -1) + right_pool = i; + + spin_lock_irqsave(&pool->lock, flags); + + if (!pool->in_use) { + + pool->in_use = 1; + kioc->pool_index = i; + kioc->buf_vaddr = pool->vaddr; + kioc->buf_paddr = pool->paddr; + + spin_unlock_irqrestore(&pool->lock, flags); + return 0; + } + else { + spin_unlock_irqrestore(&pool->lock, flags); + continue; + } + } + + /* + * If xferlen doesn't match any of our pools, return error + */ + if (right_pool == -1) + return -EINVAL; + + /* + * We did not get any buffer from the preallocated pool. Let us try + * to allocate one new buffer. NOTE: This is a blocking call. + */ + pool = &adp->dma_pool_list[right_pool]; + + spin_lock_irqsave(&pool->lock, flags); + + kioc->pool_index = right_pool; + kioc->free_buf = 1; + kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, + &kioc->buf_paddr); + spin_unlock_irqrestore(&pool->lock, flags); + + if (!kioc->buf_vaddr) + return -ENOMEM; + + return 0; +} + +/** + * mraid_mm_alloc_kioc - Returns a uioc_t from free list + * @adp : Adapter softstate for this module + * + * The kioc_semaphore is initialized with number of kioc nodes in the + * free kioc pool. If the kioc pool is empty, this function blocks till + * a kioc becomes free. + */ +static uioc_t * +mraid_mm_alloc_kioc(mraid_mmadp_t *adp) +{ + uioc_t *kioc; + struct list_head* head; + unsigned long flags; + + down(&adp->kioc_semaphore); + + spin_lock_irqsave(&adp->kioc_pool_lock, flags); + + head = &adp->kioc_pool; + + if (list_empty(head)) { + up(&adp->kioc_semaphore); + spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); + + con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n")); + return NULL; + } + + kioc = list_entry(head->next, uioc_t, list); + list_del_init(&kioc->list); + + spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); + + memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t)); + memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t)); + + kioc->buf_vaddr = NULL; + kioc->buf_paddr = 0; + kioc->pool_index =-1; + kioc->free_buf = 0; + kioc->user_data = NULL; + kioc->user_data_len = 0; + kioc->user_pthru = NULL; + kioc->timedout = 0; + + return kioc; +} + +/** + * mraid_mm_dealloc_kioc - Return kioc to free pool + * + * @adp : Adapter softstate + * @kioc : uioc_t node to be returned to free pool + */ +static void +mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc) +{ + mm_dmapool_t *pool; + unsigned long flags; + + if (kioc->pool_index != -1) { + pool = &adp->dma_pool_list[kioc->pool_index]; + + /* This routine may be called in non-isr context also */ + spin_lock_irqsave(&pool->lock, flags); + + /* + * While attaching the dma buffer, if we didn't get the + * required buffer from the pool, we would have allocated + * it at the run time and set the free_buf flag. We must + * free that buffer. Otherwise, just mark that the buffer is + * not in use + */ + if (kioc->free_buf == 1) + pci_pool_free(pool->handle, kioc->buf_vaddr, + kioc->buf_paddr); + else + pool->in_use = 0; + + spin_unlock_irqrestore(&pool->lock, flags); + } + + /* Return the kioc to the free pool */ + spin_lock_irqsave(&adp->kioc_pool_lock, flags); + list_add(&kioc->list, &adp->kioc_pool); + spin_unlock_irqrestore(&adp->kioc_pool_lock, flags); + + /* increment the free kioc count */ + up(&adp->kioc_semaphore); + + return; +} + +/** + * lld_ioctl - Routine to issue ioctl to low level drvr + * + * @adp : The adapter handle + * @kioc : The ioctl packet with kernel addresses + */ +static int +lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) +{ + int rval; + struct timer_list timer; + struct timer_list *tp = NULL; + + kioc->status = -ENODATA; + rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE); + + if (rval) return rval; + + /* + * Start the timer + */ + if (adp->timeout > 0) { + tp = &timer; + init_timer(tp); + + tp->function = lld_timedout; + tp->data = (unsigned long)kioc; + tp->expires = jiffies + adp->timeout * HZ; + + add_timer(tp); + } + + /* + * Wait till the low level driver completes the ioctl. After this + * call, the ioctl either completed successfully or timedout. + */ + wait_event(wait_q, (kioc->status != -ENODATA)); + if (tp) { + del_timer_sync(tp); + } + + /* + * If the command had timedout, we mark the controller offline + * before returning + */ + if (kioc->timedout) { + adp->quiescent = 0; + } + + return kioc->status; +} + + +/** + * ioctl_done - callback from the low level driver + * + * @kioc : completed ioctl packet + */ +static void +ioctl_done(uioc_t *kioc) +{ + uint32_t adapno; + int iterator; + mraid_mmadp_t* adapter; + + /* + * When the kioc returns from driver, make sure it still doesn't + * have ENODATA in status. Otherwise, driver will hang on wait_event + * forever + */ + if (kioc->status == -ENODATA) { + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: lld didn't change status!\n")); + + kioc->status = -EINVAL; + } + + /* + * Check if this kioc was timedout before. If so, nobody is waiting + * on this kioc. We don't have to wake up anybody. Instead, we just + * have to free the kioc + */ + if (kioc->timedout) { + iterator = 0; + adapter = NULL; + adapno = kioc->adapno; + + con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " + "ioctl that was timedout before\n")); + + list_for_each_entry(adapter, &adapters_list_g, list) { + if (iterator++ == adapno) break; + } + + kioc->timedout = 0; + + if (adapter) { + mraid_mm_dealloc_kioc( adapter, kioc ); + } + } + else { + wake_up(&wait_q); + } +} + + +/* + * lld_timedout : callback from the expired timer + * + * @ptr : ioctl packet that timed out + */ +static void +lld_timedout(unsigned long ptr) +{ + uioc_t *kioc = (uioc_t *)ptr; + + kioc->status = -ETIME; + kioc->timedout = 1; + + con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n")); + + wake_up(&wait_q); +} + + +/** + * kioc_to_mimd : Converter from new back to old format + * + * @kioc : Kernel space IOCTL packet (successfully issued) + * @mimd : User space MIMD packet + */ +static int +kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd) +{ + mimd_t kmimd; + uint8_t opcode; + uint8_t subopcode; + + mbox64_t *mbox64; + mraid_passthru_t __user *upthru32; + mraid_passthru_t *kpthru32; + mcontroller_t cinfo; + mraid_hba_info_t *hinfo; + + + if (copy_from_user(&kmimd, mimd, sizeof(mimd_t))) + return (-EFAULT); + + opcode = kmimd.ui.fcs.opcode; + subopcode = kmimd.ui.fcs.subopcode; + + if (opcode == 0x82) { + switch (subopcode) { + + case MEGAIOC_QADAPINFO: + + hinfo = (mraid_hba_info_t *)(unsigned long) + kioc->buf_vaddr; + + hinfo_to_cinfo(hinfo, &cinfo); + + if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo))) + return (-EFAULT); + + return 0; + + default: + return (-EINVAL); + } + + return 0; + } + + mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; + + if (kioc->user_pthru) { + + upthru32 = kioc->user_pthru; + kpthru32 = kioc->pthru32; + + if (copy_to_user(&upthru32->scsistatus, + &kpthru32->scsistatus, + sizeof(uint8_t))) { + return (-EFAULT); + } + } + + if (kioc->user_data) { + if (copy_to_user(kioc->user_data, kioc->buf_vaddr, + kioc->user_data_len)) { + return (-EFAULT); + } + } + + if (copy_to_user(&mimd->mbox[17], + &mbox64->mbox32.status, sizeof(uint8_t))) { + return (-EFAULT); + } + + return 0; +} + + +/** + * hinfo_to_cinfo - Convert new format hba info into old format + * + * @hinfo : New format, more comprehensive adapter info + * @cinfo : Old format adapter info to support mimd_t apps + */ +static void +hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo) +{ + if (!hinfo || !cinfo) + return; + + cinfo->base = hinfo->baseport; + cinfo->irq = hinfo->irq; + cinfo->numldrv = hinfo->num_ldrv; + cinfo->pcibus = hinfo->pci_bus; + cinfo->pcidev = hinfo->pci_slot; + cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn); + cinfo->pciid = hinfo->pci_device_id; + cinfo->pcivendor = hinfo->pci_vendor_id; + cinfo->pcislot = hinfo->pci_slot; + cinfo->uid = hinfo->unique_id; +} + + +/* + * mraid_mm_register_adp - Registration routine for low level drvrs + * + * @adp : Adapter objejct + */ +int +mraid_mm_register_adp(mraid_mmadp_t *lld_adp) +{ + mraid_mmadp_t *adapter; + mbox64_t *mbox_list; + uioc_t *kioc; + uint32_t rval; + int i; + + + if (lld_adp->drvr_type != DRVRTYPE_MBOX) + return (-EINVAL); + + adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); + + if (!adapter) { + rval = -ENOMEM; + goto memalloc_error; + } + + memset(adapter, 0, sizeof(mraid_mmadp_t)); + + adapter->unique_id = lld_adp->unique_id; + adapter->drvr_type = lld_adp->drvr_type; + adapter->drvr_data = lld_adp->drvr_data; + adapter->pdev = lld_adp->pdev; + adapter->issue_uioc = lld_adp->issue_uioc; + adapter->timeout = lld_adp->timeout; + adapter->max_kioc = lld_adp->max_kioc; + adapter->quiescent = 1; + + /* + * Allocate single blocks of memory for all required kiocs, + * mailboxes and passthru structures. + */ + adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc, + GFP_KERNEL); + adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc, + GFP_KERNEL); + adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool", + adapter->pdev, + sizeof(mraid_passthru_t), + 16, 0); + + if (!adapter->kioc_list || !adapter->mbox_list || + !adapter->pthru_dma_pool) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: out of memory, %s %d\n", __FUNCTION__, + __LINE__)); + + rval = (-ENOMEM); + + goto memalloc_error; + } + + /* + * Slice kioc_list and make a kioc_pool with the individiual kiocs + */ + INIT_LIST_HEAD(&adapter->kioc_pool); + spin_lock_init(&adapter->kioc_pool_lock); + sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc); + + mbox_list = (mbox64_t *)adapter->mbox_list; + + for (i = 0; i < lld_adp->max_kioc; i++) { + + kioc = adapter->kioc_list + i; + kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i); + kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool, + GFP_KERNEL, &kioc->pthru32_h); + + if (!kioc->pthru32) { + + con_log(CL_ANN, (KERN_WARNING + "megaraid cmm: out of memory, %s %d\n", + __FUNCTION__, __LINE__)); + + rval = (-ENOMEM); + + goto pthru_dma_pool_error; + } + + list_add_tail(&kioc->list, &adapter->kioc_pool); + } + + // Setup the dma pools for data buffers + if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) { + goto dma_pool_error; + } + + list_add_tail(&adapter->list, &adapters_list_g); + + adapters_count_g++; + + return 0; + +dma_pool_error: + /* Do nothing */ + +pthru_dma_pool_error: + + for (i = 0; i < lld_adp->max_kioc; i++) { + kioc = adapter->kioc_list + i; + if (kioc->pthru32) { + pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32, + kioc->pthru32_h); + } + } + +memalloc_error: + + if (adapter->kioc_list) + kfree(adapter->kioc_list); + + if (adapter->mbox_list) + kfree(adapter->mbox_list); + + if (adapter->pthru_dma_pool) + pci_pool_destroy(adapter->pthru_dma_pool); + + if (adapter) + kfree(adapter); + + return rval; +} + + +/** + * mraid_mm_adapter_app_handle - return the application handle for this adapter + * + * For the given driver data, locate the adadpter in our global list and + * return the corresponding handle, which is also used by applications to + * uniquely identify an adapter. + * + * @param unique_id : adapter unique identifier + * + * @return adapter handle if found in the list + * @return 0 if adapter could not be located, should never happen though + */ +uint32_t +mraid_mm_adapter_app_handle(uint32_t unique_id) +{ + mraid_mmadp_t *adapter; + mraid_mmadp_t *tmp; + int index = 0; + + list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { + + if (adapter->unique_id == unique_id) { + + return MKADAP(index); + } + + index++; + } + + return 0; +} + + +/** + * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter + * + * @adp : Adapter softstate + * + * We maintain a pool of dma buffers per each adapter. Each pool has one + * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers. + * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We + * dont' want to waste too much memory by allocating more buffers per each + * pool. + */ +static int +mraid_mm_setup_dma_pools(mraid_mmadp_t *adp) +{ + mm_dmapool_t *pool; + int bufsize; + int i; + + /* + * Create MAX_DMA_POOLS number of pools + */ + bufsize = MRAID_MM_INIT_BUFF_SIZE; + + for (i = 0; i < MAX_DMA_POOLS; i++){ + + pool = &adp->dma_pool_list[i]; + + pool->buf_size = bufsize; + spin_lock_init(&pool->lock); + + pool->handle = pci_pool_create("megaraid mm data buffer", + adp->pdev, bufsize, 16, 0); + + if (!pool->handle) { + goto dma_pool_setup_error; + } + + pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL, + &pool->paddr); + + if (!pool->vaddr) + goto dma_pool_setup_error; + + bufsize = bufsize * 2; + } + + return 0; + +dma_pool_setup_error: + + mraid_mm_teardown_dma_pools(adp); + return (-ENOMEM); +} + + +/* + * mraid_mm_unregister_adp - Unregister routine for low level drivers + * Assume no outstanding ioctls to llds. + * + * @unique_id : UID of the adpater + */ +int +mraid_mm_unregister_adp(uint32_t unique_id) +{ + mraid_mmadp_t *adapter; + mraid_mmadp_t *tmp; + + list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) { + + + if (adapter->unique_id == unique_id) { + + adapters_count_g--; + + list_del_init(&adapter->list); + + mraid_mm_free_adp_resources(adapter); + + kfree(adapter); + + con_log(CL_ANN, ( + "megaraid cmm: Unregistered one adapter:%#x\n", + unique_id)); + + return 0; + } + } + + return (-ENODEV); +} + +/** + * mraid_mm_free_adp_resources - Free adapter softstate + * + * @adp : Adapter softstate + */ +static void +mraid_mm_free_adp_resources(mraid_mmadp_t *adp) +{ + uioc_t *kioc; + int i; + + mraid_mm_teardown_dma_pools(adp); + + for (i = 0; i < adp->max_kioc; i++) { + + kioc = adp->kioc_list + i; + + pci_pool_free(adp->pthru_dma_pool, kioc->pthru32, + kioc->pthru32_h); + } + + kfree(adp->kioc_list); + + kfree(adp->mbox_list); + + pci_pool_destroy(adp->pthru_dma_pool); + + + return; +} + + +/** + * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers + * + * @adp : Adapter softstate + */ +static void +mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp) +{ + int i; + mm_dmapool_t *pool; + + for (i = 0; i < MAX_DMA_POOLS; i++) { + + pool = &adp->dma_pool_list[i]; + + if (pool->handle) { + + if (pool->vaddr) + pci_pool_free(pool->handle, pool->vaddr, + pool->paddr); + + pci_pool_destroy(pool->handle); + pool->handle = NULL; + } + } + + return; +} + +/** + * mraid_mm_init : Module entry point + */ +static int __init +mraid_mm_init(void) +{ + // Announce the driver version + con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n", + LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION)); + + majorno = register_chrdev(0, "megadev", &lsi_fops); + + if (majorno < 0) { + con_log(CL_ANN, ("megaraid cmm: cannot get major\n")); + return majorno; + } + + init_waitqueue_head(&wait_q); + + INIT_LIST_HEAD(&adapters_list_g); + + return 0; +} + + +/** + * mraid_mm_compat_ioctl : 32bit to 64bit ioctl conversion routine + */ +#ifdef CONFIG_COMPAT +static long +mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int err; + lock_kernel(); + err = mraid_mm_ioctl(NULL, filep, cmd, arg); + unlock_kernel(); + return err; +} +#endif + +/** + * mraid_mm_exit : Module exit point + */ +static void __exit +mraid_mm_exit(void) +{ + con_log(CL_DLEVEL1 , ("exiting common mod\n")); + + unregister_chrdev(majorno, "megadev"); +} + +module_init(mraid_mm_init); +module_exit(mraid_mm_exit); + +/* vi: set ts=8 sw=8 tw=78: */ diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h new file mode 100644 index 000000000000..948a0012ab8c --- /dev/null +++ b/drivers/scsi/megaraid/megaraid_mm.h @@ -0,0 +1,102 @@ +/* + * + * Linux MegaRAID device driver + * + * Copyright (c) 2003-2004 LSI Logic Corporation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * FILE : megaraid_mm.h + */ + +#ifndef MEGARAID_MM_H +#define MEGARAID_MM_H + +#include <linux/spinlock.h> +#include <linux/fs.h> +#include <asm/uaccess.h> +#include <linux/version.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/list.h> +#include <linux/ioctl32.h> + +#include "mbox_defs.h" +#include "megaraid_ioctl.h" + + +#define LSI_COMMON_MOD_VERSION "2.20.2.5" +#define LSI_COMMON_MOD_EXT_VERSION \ + "(Release Date: Fri Jan 21 00:01:03 EST 2005)" + + +#define LSI_DBGLVL dbglevel + +// The smallest dma pool +#define MRAID_MM_INIT_BUFF_SIZE 4096 + +/** + * mimd_t : Old style ioctl packet structure (deprecated) + * + * @inlen : + * @outlen : + * @fca : + * @opcode : + * @subopcode : + * @adapno : + * @buffer : + * @pad : + * @length : + * @mbox : + * @pthru : + * @data : + * @pad : + * + * Note : This structure is DEPRECATED. New applications must use + * : uioc_t structure instead. All new hba drivers use the new + * : format. If we get this mimd packet, we will convert it into + * : new uioc_t format and send it to the hba drivers. + */ + +typedef struct mimd { + + uint32_t inlen; + uint32_t outlen; + + union { + uint8_t fca[16]; + struct { + uint8_t opcode; + uint8_t subopcode; + uint16_t adapno; +#if BITS_PER_LONG == 32 + uint8_t __user *buffer; + uint8_t pad[4]; +#endif +#if BITS_PER_LONG == 64 + uint8_t __user *buffer; +#endif + uint32_t length; + } __attribute__ ((packed)) fcs; + } __attribute__ ((packed)) ui; + + uint8_t mbox[18]; /* 16 bytes + 2 status bytes */ + mraid_passthru_t pthru; + +#if BITS_PER_LONG == 32 + char __user *data; /* buffer <= 4096 for 0x80 commands */ + char pad[4]; +#endif +#if BITS_PER_LONG == 64 + char __user *data; +#endif + +} __attribute__ ((packed))mimd_t; + +#endif // MEGARAID_MM_H + +// vi: set ts=8 sw=8 tw=78: |