diff options
Diffstat (limited to 'drivers')
365 files changed, 35246 insertions, 7600 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 9bfb71ff3a6a..177c7d156933 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -117,4 +117,6 @@ source "drivers/staging/Kconfig" source "drivers/platform/Kconfig" source "drivers/clk/Kconfig" + +source "drivers/hwspinlock/Kconfig" endmenu diff --git a/drivers/Makefile b/drivers/Makefile index b423bb16c3a8..3f135b6fb014 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -117,3 +117,5 @@ obj-y += platform/ obj-y += ieee802154/ #common clk code obj-y += clk/ + +obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index e7df019d29d4..6d2bb2524b6e 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -13,16 +13,17 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/io.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> #include <linux/amba/bus.h> #include <asm/irq.h> #include <asm/sizes.h> -#define to_amba_device(d) container_of(d, struct amba_device, dev) #define to_amba_driver(d) container_of(d, struct amba_driver, drv) -static struct amba_id * -amba_lookup(struct amba_id *table, struct amba_device *dev) +static const struct amba_id * +amba_lookup(const struct amba_id *table, struct amba_device *dev) { int ret = 0; @@ -57,26 +58,6 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env) #define amba_uevent NULL #endif -static int amba_suspend(struct device *dev, pm_message_t state) -{ - struct amba_driver *drv = to_amba_driver(dev->driver); - int ret = 0; - - if (dev->driver && drv->suspend) - ret = drv->suspend(to_amba_device(dev), state); - return ret; -} - -static int amba_resume(struct device *dev) -{ - struct amba_driver *drv = to_amba_driver(dev->driver); - int ret = 0; - - if (dev->driver && drv->resume) - ret = drv->resume(to_amba_device(dev)); - return ret; -} - #define amba_attr_func(name,fmt,arg...) \ static ssize_t name##_show(struct device *_dev, \ struct device_attribute *attr, char *buf) \ @@ -102,17 +83,330 @@ static struct device_attribute amba_dev_attrs[] = { __ATTR_NULL, }; +#ifdef CONFIG_PM_SLEEP + +static int amba_legacy_suspend(struct device *dev, pm_message_t mesg) +{ + struct amba_driver *adrv = to_amba_driver(dev->driver); + struct amba_device *adev = to_amba_device(dev); + int ret = 0; + + if (dev->driver && adrv->suspend) + ret = adrv->suspend(adev, mesg); + + return ret; +} + +static int amba_legacy_resume(struct device *dev) +{ + struct amba_driver *adrv = to_amba_driver(dev->driver); + struct amba_device *adev = to_amba_device(dev); + int ret = 0; + + if (dev->driver && adrv->resume) + ret = adrv->resume(adev); + + return ret; +} + +static int amba_pm_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm && drv->pm->prepare) + ret = drv->pm->prepare(dev); + + return ret; +} + +static void amba_pm_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); +} + +#else /* !CONFIG_PM_SLEEP */ + +#define amba_pm_prepare NULL +#define amba_pm_complete NULL + +#endif /* !CONFIG_PM_SLEEP */ + +#ifdef CONFIG_SUSPEND + +static int amba_pm_suspend(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->suspend) + ret = drv->pm->suspend(dev); + } else { + ret = amba_legacy_suspend(dev, PMSG_SUSPEND); + } + + return ret; +} + +static int amba_pm_suspend_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->suspend_noirq) + ret = drv->pm->suspend_noirq(dev); + } + + return ret; +} + +static int amba_pm_resume(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->resume) + ret = drv->pm->resume(dev); + } else { + ret = amba_legacy_resume(dev); + } + + return ret; +} + +static int amba_pm_resume_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->resume_noirq) + ret = drv->pm->resume_noirq(dev); + } + + return ret; +} + +#else /* !CONFIG_SUSPEND */ + +#define amba_pm_suspend NULL +#define amba_pm_resume NULL +#define amba_pm_suspend_noirq NULL +#define amba_pm_resume_noirq NULL + +#endif /* !CONFIG_SUSPEND */ + +#ifdef CONFIG_HIBERNATION + +static int amba_pm_freeze(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze) + ret = drv->pm->freeze(dev); + } else { + ret = amba_legacy_suspend(dev, PMSG_FREEZE); + } + + return ret; +} + +static int amba_pm_freeze_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->freeze_noirq) + ret = drv->pm->freeze_noirq(dev); + } + + return ret; +} + +static int amba_pm_thaw(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->thaw) + ret = drv->pm->thaw(dev); + } else { + ret = amba_legacy_resume(dev); + } + + return ret; +} + +static int amba_pm_thaw_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->thaw_noirq) + ret = drv->pm->thaw_noirq(dev); + } + + return ret; +} + +static int amba_pm_poweroff(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->poweroff) + ret = drv->pm->poweroff(dev); + } else { + ret = amba_legacy_suspend(dev, PMSG_HIBERNATE); + } + + return ret; +} + +static int amba_pm_poweroff_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->poweroff_noirq) + ret = drv->pm->poweroff_noirq(dev); + } + + return ret; +} + +static int amba_pm_restore(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->restore) + ret = drv->pm->restore(dev); + } else { + ret = amba_legacy_resume(dev); + } + + return ret; +} + +static int amba_pm_restore_noirq(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (!drv) + return 0; + + if (drv->pm) { + if (drv->pm->restore_noirq) + ret = drv->pm->restore_noirq(dev); + } + + return ret; +} + +#else /* !CONFIG_HIBERNATION */ + +#define amba_pm_freeze NULL +#define amba_pm_thaw NULL +#define amba_pm_poweroff NULL +#define amba_pm_restore NULL +#define amba_pm_freeze_noirq NULL +#define amba_pm_thaw_noirq NULL +#define amba_pm_poweroff_noirq NULL +#define amba_pm_restore_noirq NULL + +#endif /* !CONFIG_HIBERNATION */ + +#ifdef CONFIG_PM + +static const struct dev_pm_ops amba_pm = { + .prepare = amba_pm_prepare, + .complete = amba_pm_complete, + .suspend = amba_pm_suspend, + .resume = amba_pm_resume, + .freeze = amba_pm_freeze, + .thaw = amba_pm_thaw, + .poweroff = amba_pm_poweroff, + .restore = amba_pm_restore, + .suspend_noirq = amba_pm_suspend_noirq, + .resume_noirq = amba_pm_resume_noirq, + .freeze_noirq = amba_pm_freeze_noirq, + .thaw_noirq = amba_pm_thaw_noirq, + .poweroff_noirq = amba_pm_poweroff_noirq, + .restore_noirq = amba_pm_restore_noirq, + SET_RUNTIME_PM_OPS( + pm_generic_runtime_suspend, + pm_generic_runtime_resume, + pm_generic_runtime_idle + ) +}; + +#define AMBA_PM (&amba_pm) + +#else /* !CONFIG_PM */ + +#define AMBA_PM NULL + +#endif /* !CONFIG_PM */ + /* * Primecells are part of the Advanced Microcontroller Bus Architecture, * so we call the bus "amba". */ -static struct bus_type amba_bustype = { +struct bus_type amba_bustype = { .name = "amba", .dev_attrs = amba_dev_attrs, .match = amba_match, .uevent = amba_uevent, - .suspend = amba_suspend, - .resume = amba_resume, + .pm = AMBA_PM, }; static int __init amba_init(void) @@ -188,7 +482,7 @@ static int amba_probe(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); struct amba_driver *pcdrv = to_amba_driver(dev->driver); - struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev); + const struct amba_id *id = amba_lookup(pcdrv->id_table, pcdev); int ret; do { diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index ef3ce26bb1f0..0f91e583892e 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -6,7 +6,7 @@ * Author: Ashish Kalra <ashish.kalra@freescale.com> * Li Yang <leoli@freescale.com> * - * Copyright (c) 2006-2007 Freescale Semiconductor, Inc. + * Copyright (c) 2006-2007, 2011 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -157,7 +157,8 @@ enum { IE_ON_SINGL_DEVICE_ERR | IE_ON_CMD_COMPLETE, EXT_INDIRECT_SEG_PRD_FLAG = (1 << 31), - DATA_SNOOP_ENABLE = (1 << 22), + DATA_SNOOP_ENABLE_V1 = (1 << 22), + DATA_SNOOP_ENABLE_V2 = (1 << 28), }; /* @@ -260,6 +261,7 @@ struct sata_fsl_host_priv { void __iomem *ssr_base; void __iomem *csr_base; int irq; + int data_snoop; }; static inline unsigned int sata_fsl_tag(unsigned int tag, @@ -312,7 +314,8 @@ static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp, } static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, - u32 *ttl, dma_addr_t cmd_desc_paddr) + u32 *ttl, dma_addr_t cmd_desc_paddr, + int data_snoop) { struct scatterlist *sg; unsigned int num_prde = 0; @@ -362,8 +365,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, ttl_dwords += sg_len; prd->dba = cpu_to_le32(sg_addr); - prd->ddc_and_ext = - cpu_to_le32(DATA_SNOOP_ENABLE | (sg_len & ~0x03)); + prd->ddc_and_ext = cpu_to_le32(data_snoop | (sg_len & ~0x03)); VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n", ttl_dwords, prd->dba, prd->ddc_and_ext); @@ -378,7 +380,7 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc, /* set indirect extension flag along with indirect ext. size */ prd_ptr_to_indirect_ext->ddc_and_ext = cpu_to_le32((EXT_INDIRECT_SEG_PRD_FLAG | - DATA_SNOOP_ENABLE | + data_snoop | (indirect_ext_segment_sz & ~0x03))); } @@ -421,7 +423,8 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) if (qc->flags & ATA_QCFLAG_DMAMAP) num_prde = sata_fsl_fill_sg(qc, (void *)cd, - &ttl_dwords, cd_paddr); + &ttl_dwords, cd_paddr, + host_priv->data_snoop); if (qc->tf.protocol == ATA_PROT_NCQ) desc_info |= FPDMA_QUEUED_CMD; @@ -1349,6 +1352,11 @@ static int sata_fsl_probe(struct platform_device *ofdev) } host_priv->irq = irq; + if (of_device_is_compatible(ofdev->dev.of_node, "fsl,pq-sata-v2")) + host_priv->data_snoop = DATA_SNOOP_ENABLE_V2; + else + host_priv->data_snoop = DATA_SNOOP_ENABLE_V1; + /* allocate host structure */ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_FSL_MAX_PORTS); @@ -1431,6 +1439,9 @@ static struct of_device_id fsl_sata_match[] = { { .compatible = "fsl,pq-sata", }, + { + .compatible = "fsl,pq-sata-v2", + }, {}, }; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 7d912baf01d4..049650d42c88 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1031,7 +1031,7 @@ static int fs_open(struct atm_vcc *atm_vcc) /* We now use the "submit_command" function to submit commands to the firestream. There is a define up near the definition of that routine that switches this routine between immediate write - to the immediate comamnd registers and queuing the commands in + to the immediate command registers and queuing the commands in the HPTXQ for execution. This last technique might be more efficient if we know we're going to submit a whole lot of commands in one go, but this driver is not setup to be able to diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h index a0b403a6b4ed..e5565fbaeb30 100644 --- a/drivers/block/smart1,2.h +++ b/drivers/block/smart1,2.h @@ -95,7 +95,7 @@ static unsigned long smart4_completed(ctlr_info_t *h) /* * This hardware returns interrupt pending at a different place and * it does not tell us if the fifo is empty, we will have check - * that by getting a 0 back from the comamnd_completed call. + * that by getting a 0 back from the command_completed call. */ static unsigned long smart4_intr_pending(ctlr_info_t *h) { diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7e0ebd4a1a74..866811428e20 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -433,7 +433,7 @@ static void btusb_isoc_complete(struct urb *urb) } } -static void inline __fill_isoc_descriptor(struct urb *urb, int len, int mtu) +static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu) { int i, offset = 0; @@ -780,7 +780,7 @@ static void btusb_notify(struct hci_dev *hdev, unsigned int evt) } } -static int inline __set_isoc_interface(struct hci_dev *hdev, int altsetting) +static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting) { struct btusb_data *data = hdev->driver_data; struct usb_interface *intf = data->isoc; diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c index a348c7e9aa0b..dd1d143eb8ea 100644 --- a/drivers/char/hw_random/nomadik-rng.c +++ b/drivers/char/hw_random/nomadik-rng.c @@ -39,7 +39,7 @@ static struct hwrng nmk_rng = { .read = nmk_rng_read, }; -static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id) +static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id) { void __iomem *base; int ret; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 5cb4d09919d6..0f17ad8585d7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1371,7 +1371,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) goto out; if (cpufreq_driver->suspend) { - ret = cpufreq_driver->suspend(cpu_policy, pmsg); + ret = cpufreq_driver->suspend(cpu_policy); if (ret) printk(KERN_ERR "cpufreq: suspend failed in ->suspend " "step on CPU %u\n", cpu_policy->cpu); diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 94284c8473b1..33b56e5c5c14 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -76,8 +76,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. + * dbs_mutex protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); @@ -116,7 +115,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, if (wall) *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); - return (cputime64_t)jiffies_to_usecs(idle_time);; + return (cputime64_t)jiffies_to_usecs(idle_time); } static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) @@ -162,21 +161,12 @@ static struct notifier_block dbs_cpufreq_notifier_block = { }; /************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", current->comm); - return sprintf(buf, "%u\n", -1U); -} - static ssize_t show_sampling_rate_min(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", min_sampling_rate); } -define_one_global_ro(sampling_rate_max); define_one_global_ro(sampling_rate_min); /* cpufreq_conservative Governor Tunables */ @@ -193,33 +183,6 @@ show_one(down_threshold, down_threshold); show_one(ignore_nice_load, ignore_nice); show_one(freq_step, freq_step); -/*** delete after deprecation time ***/ -#define DEPRECATION_MSG(file_name) \ - printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ - "interface is deprecated - " #file_name "\n"); - -#define show_one_old(file_name) \ -static ssize_t show_##file_name##_old \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return show_##file_name(NULL, NULL, buf); \ -} -show_one_old(sampling_rate); -show_one_old(sampling_down_factor); -show_one_old(up_threshold); -show_one_old(down_threshold); -show_one_old(ignore_nice_load); -show_one_old(freq_step); -show_one_old(sampling_rate_min); -show_one_old(sampling_rate_max); - -cpufreq_freq_attr_ro_old(sampling_rate_min); -cpufreq_freq_attr_ro_old(sampling_rate_max); - -/*** delete after deprecation time ***/ - static ssize_t store_sampling_down_factor(struct kobject *a, struct attribute *b, const char *buf, size_t count) @@ -231,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a, if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_down_factor = input; - mutex_unlock(&dbs_mutex); - return count; } @@ -248,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; - mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - mutex_unlock(&dbs_mutex); - return count; } @@ -262,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, int ret; ret = sscanf(buf, "%u", &input); - mutex_lock(&dbs_mutex); if (ret != 1 || input > 100 || - input <= dbs_tuners_ins.down_threshold) { - mutex_unlock(&dbs_mutex); + input <= dbs_tuners_ins.down_threshold) return -EINVAL; - } dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - return count; } @@ -282,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, int ret; ret = sscanf(buf, "%u", &input); - mutex_lock(&dbs_mutex); /* cannot be lower than 11 otherwise freq will not fall */ if (ret != 1 || input < 11 || input > 100 || - input >= dbs_tuners_ins.up_threshold) { - mutex_unlock(&dbs_mutex); + input >= dbs_tuners_ins.up_threshold) return -EINVAL; - } dbs_tuners_ins.down_threshold = input; - mutex_unlock(&dbs_mutex); - return count; } @@ -311,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (input > 1) input = 1; - mutex_lock(&dbs_mutex); - if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); + if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ return count; - } + dbs_tuners_ins.ignore_nice = input; /* we need to re-evaluate prev_cpu_idle */ @@ -327,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (dbs_tuners_ins.ignore_nice) dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } - mutex_unlock(&dbs_mutex); - return count; } @@ -347,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b, /* no need to test here if freq_step is zero as the user might actually * want this, they would be crazy though :) */ - mutex_lock(&dbs_mutex); dbs_tuners_ins.freq_step = input; - mutex_unlock(&dbs_mutex); - return count; } @@ -362,7 +302,6 @@ define_one_global_rw(ignore_nice_load); define_one_global_rw(freq_step); static struct attribute *dbs_attributes[] = { - &sampling_rate_max.attr, &sampling_rate_min.attr, &sampling_rate.attr, &sampling_down_factor.attr, @@ -378,49 +317,6 @@ static struct attribute_group dbs_attr_group = { .name = "conservative", }; -/*** delete after deprecation time ***/ - -#define write_one_old(file_name) \ -static ssize_t store_##file_name##_old \ -(struct cpufreq_policy *unused, const char *buf, size_t count) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return store_##file_name(NULL, NULL, buf, count); \ -} -write_one_old(sampling_rate); -write_one_old(sampling_down_factor); -write_one_old(up_threshold); -write_one_old(down_threshold); -write_one_old(ignore_nice_load); -write_one_old(freq_step); - -cpufreq_freq_attr_rw_old(sampling_rate); -cpufreq_freq_attr_rw_old(sampling_down_factor); -cpufreq_freq_attr_rw_old(up_threshold); -cpufreq_freq_attr_rw_old(down_threshold); -cpufreq_freq_attr_rw_old(ignore_nice_load); -cpufreq_freq_attr_rw_old(freq_step); - -static struct attribute *dbs_attributes_old[] = { - &sampling_rate_max_old.attr, - &sampling_rate_min_old.attr, - &sampling_rate_old.attr, - &sampling_down_factor_old.attr, - &up_threshold_old.attr, - &down_threshold_old.attr, - &ignore_nice_load_old.attr, - &freq_step_old.attr, - NULL -}; - -static struct attribute_group dbs_attr_group_old = { - .attrs = dbs_attributes_old, - .name = "conservative", -}; - -/*** delete after deprecation time ***/ - /************************** sysfs end ************************/ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) @@ -596,12 +492,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, mutex_lock(&dbs_mutex); - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); @@ -664,7 +554,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_timer_exit(this_dbs_info); mutex_lock(&dbs_mutex); - sysfs_remove_group(&policy->kobj, &dbs_attr_group_old); dbs_enable--; mutex_destroy(&this_dbs_info->timer_mutex); diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 58aa85ea5ec6..891360edecdd 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -99,8 +99,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); static unsigned int dbs_enable; /* number of CPUs using this policy */ /* - * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on - * different CPUs. It protects dbs_enable in governor start/stop. + * dbs_mutex protects dbs_enable in governor start/stop. */ static DEFINE_MUTEX(dbs_mutex); @@ -235,21 +234,12 @@ static void ondemand_powersave_bias_init(void) /************************** sysfs interface ************************/ -static ssize_t show_sampling_rate_max(struct kobject *kobj, - struct attribute *attr, char *buf) -{ - printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max " - "sysfs file is deprecated - used by: %s\n", current->comm); - return sprintf(buf, "%u\n", -1U); -} - static ssize_t show_sampling_rate_min(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%u\n", min_sampling_rate); } -define_one_global_ro(sampling_rate_max); define_one_global_ro(sampling_rate_min); /* cpufreq_ondemand Governor Tunables */ @@ -266,32 +256,6 @@ show_one(sampling_down_factor, sampling_down_factor); show_one(ignore_nice_load, ignore_nice); show_one(powersave_bias, powersave_bias); -/*** delete after deprecation time ***/ - -#define DEPRECATION_MSG(file_name) \ - printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \ - "interface is deprecated - " #file_name "\n"); - -#define show_one_old(file_name) \ -static ssize_t show_##file_name##_old \ -(struct cpufreq_policy *unused, char *buf) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return show_##file_name(NULL, NULL, buf); \ -} -show_one_old(sampling_rate); -show_one_old(up_threshold); -show_one_old(ignore_nice_load); -show_one_old(powersave_bias); -show_one_old(sampling_rate_min); -show_one_old(sampling_rate_max); - -cpufreq_freq_attr_ro_old(sampling_rate_min); -cpufreq_freq_attr_ro_old(sampling_rate_max); - -/*** delete after deprecation time ***/ - static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, const char *buf, size_t count) { @@ -300,11 +264,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; - - mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); - mutex_unlock(&dbs_mutex); - return count; } @@ -317,11 +277,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, ret = sscanf(buf, "%u", &input); if (ret != 1) return -EINVAL; - - mutex_lock(&dbs_mutex); dbs_tuners_ins.io_is_busy = !!input; - mutex_unlock(&dbs_mutex); - return count; } @@ -336,11 +292,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, input < MIN_FREQUENCY_UP_THRESHOLD) { return -EINVAL; } - - mutex_lock(&dbs_mutex); dbs_tuners_ins.up_threshold = input; - mutex_unlock(&dbs_mutex); - return count; } @@ -353,7 +305,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a, if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) return -EINVAL; - mutex_lock(&dbs_mutex); dbs_tuners_ins.sampling_down_factor = input; /* Reset down sampling multiplier in case it was active */ @@ -362,8 +313,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a, dbs_info = &per_cpu(od_cpu_dbs_info, j); dbs_info->rate_mult = 1; } - mutex_unlock(&dbs_mutex); - return count; } @@ -382,9 +331,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, if (input > 1) input = 1; - mutex_lock(&dbs_mutex); if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ - mutex_unlock(&dbs_mutex); return count; } dbs_tuners_ins.ignore_nice = input; @@ -399,8 +346,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; } - mutex_unlock(&dbs_mutex); - return count; } @@ -417,11 +362,8 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, if (input > 1000) input = 1000; - mutex_lock(&dbs_mutex); dbs_tuners_ins.powersave_bias = input; ondemand_powersave_bias_init(); - mutex_unlock(&dbs_mutex); - return count; } @@ -433,7 +375,6 @@ define_one_global_rw(ignore_nice_load); define_one_global_rw(powersave_bias); static struct attribute *dbs_attributes[] = { - &sampling_rate_max.attr, &sampling_rate_min.attr, &sampling_rate.attr, &up_threshold.attr, @@ -449,43 +390,6 @@ static struct attribute_group dbs_attr_group = { .name = "ondemand", }; -/*** delete after deprecation time ***/ - -#define write_one_old(file_name) \ -static ssize_t store_##file_name##_old \ -(struct cpufreq_policy *unused, const char *buf, size_t count) \ -{ \ - printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \ - "interface is deprecated - " #file_name "\n"); \ - return store_##file_name(NULL, NULL, buf, count); \ -} -write_one_old(sampling_rate); -write_one_old(up_threshold); -write_one_old(ignore_nice_load); -write_one_old(powersave_bias); - -cpufreq_freq_attr_rw_old(sampling_rate); -cpufreq_freq_attr_rw_old(up_threshold); -cpufreq_freq_attr_rw_old(ignore_nice_load); -cpufreq_freq_attr_rw_old(powersave_bias); - -static struct attribute *dbs_attributes_old[] = { - &sampling_rate_max_old.attr, - &sampling_rate_min_old.attr, - &sampling_rate_old.attr, - &up_threshold_old.attr, - &ignore_nice_load_old.attr, - &powersave_bias_old.attr, - NULL -}; - -static struct attribute_group dbs_attr_group_old = { - .attrs = dbs_attributes_old, - .name = "ondemand", -}; - -/*** delete after deprecation time ***/ - /************************** sysfs end ************************/ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) @@ -642,12 +546,7 @@ static void do_dbs_timer(struct work_struct *work) unsigned int cpu = dbs_info->cpu; int sample_type = dbs_info->sample_type; - /* We want all CPUs to do sampling nearly on same jiffy */ - int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate - * dbs_info->rate_mult); - - if (num_online_cpus() > 1) - delay -= jiffies % delay; + int delay; mutex_lock(&dbs_info->timer_mutex); @@ -660,10 +559,20 @@ static void do_dbs_timer(struct work_struct *work) /* Setup timer for SUB_SAMPLE */ dbs_info->sample_type = DBS_SUB_SAMPLE; delay = dbs_info->freq_hi_jiffies; + } else { + /* We want all CPUs to do sampling nearly on + * same jiffy + */ + delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate + * dbs_info->rate_mult); + + if (num_online_cpus() > 1) + delay -= jiffies % delay; } } else { __cpufreq_driver_target(dbs_info->cur_policy, dbs_info->freq_lo, CPUFREQ_RELATION_H); + delay = dbs_info->freq_lo_jiffies; } schedule_delayed_work_on(cpu, &dbs_info->work, delay); mutex_unlock(&dbs_info->timer_mutex); @@ -727,12 +636,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, mutex_lock(&dbs_mutex); - rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old); - if (rc) { - mutex_unlock(&dbs_mutex); - return rc; - } - dbs_enable++; for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; @@ -785,7 +688,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_timer_exit(this_dbs_info); mutex_lock(&dbs_mutex); - sysfs_remove_group(&policy->kobj, &dbs_attr_group_old); mutex_destroy(&this_dbs_info->timer_mutex); dbs_enable--; mutex_unlock(&dbs_mutex); diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 0310ffaec9df..be7917ec40c9 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c @@ -300,7 +300,7 @@ static struct kobj_type ktype_state_cpuidle = { .release = cpuidle_state_sysfs_release, }; -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i) +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i) { kobject_put(&device->kobjs[i]->kobj); wait_for_completion(&device->kobjs[i]->kobj_unregister); diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 07bca4970e50..e6d7228b1479 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1845,7 +1845,7 @@ static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) } #endif -static int pl08x_probe(struct amba_device *adev, struct amba_id *id) +static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) { struct pl08x_driver_data *pl08x; const struct vendor_data *vd = id->data; diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index a6656834f0ff..00deabd9a04b 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -849,7 +849,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) /* Must clear TC interrupt before calling * dma_tc_handle - * in case tc_handle initate a new dma job + * in case tc_handle initiate a new dma job */ __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); @@ -894,7 +894,7 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) } /* Must clear TC interrupt before calling * dma_tc_handle - * in case tc_handle initate a new dma job + * in case tc_handle initiate a new dma job */ __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 7c50f6dfd3f4..6abe1ec1f2ce 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -657,7 +657,7 @@ static irqreturn_t pl330_irq_handler(int irq, void *data) } static int __devinit -pl330_probe(struct amba_device *adev, struct amba_id *id) +pl330_probe(struct amba_device *adev, const struct amba_id *id) { struct dma_pl330_platdata *pdat; struct dma_pl330_dmac *pdmac; diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 28720d3103c4..6451b581a70b 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -750,7 +750,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) return; } - /* Find the first not transferred desciptor */ + /* Find the first not transferred descriptor */ list_for_each_entry(desc, &sh_chan->ld_queue, node) if (desc->mark == DESC_SUBMITTED) { dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 3b88a4e7c98a..f69f90a61873 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -629,7 +629,7 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, desc_node) list_move(&td_desc->desc_node, &td_chan->free_list); - /* now tear down the runnning */ + /* now tear down the running */ __td_finish(td_chan); spin_unlock_bh(&td_chan->lock); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index fe70a341bd8b..fac1a2002e67 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -7,7 +7,7 @@ menuconfig EDAC bool "EDAC (Error Detection And Correction) reporting" depends on HAS_IOMEM - depends on X86 || PPC + depends on X86 || PPC || TILE help EDAC is designed to report errors in the core system. These are low-level errors that are reported in the CPU or @@ -282,4 +282,12 @@ config EDAC_CPC925 a companion chip to the PowerPC 970 family of processors. +config EDAC_TILE + tristate "Tilera Memory Controller" + depends on EDAC_MM_EDAC && TILE + default y + help + Support for error detection and correction on the + Tilera memory controller. + endif # EDAC diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index ba2898b3639b..3e239133e29e 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -54,3 +54,4 @@ obj-$(CONFIG_EDAC_PPC4XX) += ppc4xx_edac.o obj-$(CONFIG_EDAC_AMD8111) += amd8111_edac.o obj-$(CONFIG_EDAC_AMD8131) += amd8131_edac.o +obj-$(CONFIG_EDAC_TILE) += tile_edac.o diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 23e03554f0d3..0be30e978c85 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -25,59 +25,12 @@ static struct mem_ctl_info **mcis; static struct ecc_settings **ecc_stngs; /* - * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and - * later. - */ -static int ddr2_dbam_revCG[] = { - [0] = 32, - [1] = 64, - [2] = 128, - [3] = 256, - [4] = 512, - [5] = 1024, - [6] = 2048, -}; - -static int ddr2_dbam_revD[] = { - [0] = 32, - [1] = 64, - [2 ... 3] = 128, - [4] = 256, - [5] = 512, - [6] = 256, - [7] = 512, - [8 ... 9] = 1024, - [10] = 2048, -}; - -static int ddr2_dbam[] = { [0] = 128, - [1] = 256, - [2 ... 4] = 512, - [5 ... 6] = 1024, - [7 ... 8] = 2048, - [9 ... 10] = 4096, - [11] = 8192, -}; - -static int ddr3_dbam[] = { [0] = -1, - [1] = 256, - [2] = 512, - [3 ... 4] = -1, - [5 ... 6] = 1024, - [7 ... 8] = 2048, - [9 ... 10] = 4096, - [11] = 8192, -}; - -/* * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching- * or higher value'. * *FIXME: Produce a better mapping/linearisation. */ - - struct scrubrate { u32 scrubval; /* bit pattern for scrub rate */ u32 bandwidth; /* bandwidth consumed (bytes/sec) */ @@ -107,6 +60,79 @@ struct scrubrate { { 0x00, 0UL}, /* scrubbing off */ }; +static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, + u32 *val, const char *func) +{ + int err = 0; + + err = pci_read_config_dword(pdev, offset, val); + if (err) + amd64_warn("%s: error reading F%dx%03x.\n", + func, PCI_FUNC(pdev->devfn), offset); + + return err; +} + +int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, + u32 val, const char *func) +{ + int err = 0; + + err = pci_write_config_dword(pdev, offset, val); + if (err) + amd64_warn("%s: error writing to F%dx%03x.\n", + func, PCI_FUNC(pdev->devfn), offset); + + return err; +} + +/* + * + * Depending on the family, F2 DCT reads need special handling: + * + * K8: has a single DCT only + * + * F10h: each DCT has its own set of regs + * DCT0 -> F2x040.. + * DCT1 -> F2x140.. + * + * F15h: we select which DCT we access using F1x10C[DctCfgSel] + * + */ +static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, + const char *func) +{ + if (addr >= 0x100) + return -EINVAL; + + return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); +} + +static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, + const char *func) +{ + return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); +} + +static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val, + const char *func) +{ + u32 reg = 0; + u8 dct = 0; + + if (addr >= 0x140 && addr <= 0x1a0) { + dct = 1; + addr -= 0x100; + } + + amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®); + reg &= 0xfffffffe; + reg |= dct; + amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg); + + return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func); +} + /* * Memory scrubber control interface. For K8, memory scrubbing is handled by * hardware and can involve L2 cache, dcache as well as the main memory. With @@ -156,7 +182,7 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) scrubval = scrubrates[i].scrubval; - pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F); + pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F); if (scrubval) return scrubrates[i].bandwidth; @@ -167,8 +193,12 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw) { struct amd64_pvt *pvt = mci->pvt_info; + u32 min_scrubrate = 0x5; + + if (boot_cpu_data.x86 == 0xf) + min_scrubrate = 0x0; - return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate); + return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate); } static int amd64_get_scrub_rate(struct mem_ctl_info *mci) @@ -177,7 +207,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci) u32 scrubval = 0; int i, retval = -EINVAL; - amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval); + amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); scrubval = scrubval & 0x001F; @@ -192,63 +222,14 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci) return retval; } -/* Map from a CSROW entry to the mask entry that operates on it */ -static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) -{ - if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) - return csrow; - else - return csrow >> 1; -} - -/* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ -static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow) -{ - if (dct == 0) - return pvt->dcsb0[csrow]; - else - return pvt->dcsb1[csrow]; -} - -/* - * Return the 'mask' address the i'th CS entry. This function is needed because - * there number of DCSM registers on Rev E and prior vs Rev F and later is - * different. - */ -static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow) -{ - if (dct == 0) - return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)]; - else - return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)]; -} - - /* - * In *base and *limit, pass back the full 40-bit base and limit physical - * addresses for the node given by node_id. This information is obtained from - * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The - * base and limit addresses are of type SysAddr, as defined at the start of - * section 3.4.4 (p. 70). They are the lowest and highest physical addresses - * in the address range they represent. + * returns true if the SysAddr given by sys_addr matches the + * DRAM base/limit associated with node_id */ -static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id, - u64 *base, u64 *limit) +static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, + unsigned nid) { - *base = pvt->dram_base[node_id]; - *limit = pvt->dram_limit[node_id]; -} - -/* - * Return 1 if the SysAddr given by sys_addr matches the base/limit associated - * with node_id - */ -static int amd64_base_limit_match(struct amd64_pvt *pvt, - u64 sys_addr, int node_id) -{ - u64 base, limit, addr; - - amd64_get_base_and_limit(pvt, node_id, &base, &limit); + u64 addr; /* The K8 treats this as a 40-bit value. However, bits 63-40 will be * all ones if the most significant implemented address bit is 1. @@ -258,7 +239,8 @@ static int amd64_base_limit_match(struct amd64_pvt *pvt, */ addr = sys_addr & 0x000000ffffffffffull; - return (addr >= base) && (addr <= limit); + return ((addr >= get_dram_base(pvt, nid)) && + (addr <= get_dram_limit(pvt, nid))); } /* @@ -271,7 +253,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, u64 sys_addr) { struct amd64_pvt *pvt; - int node_id; + unsigned node_id; u32 intlv_en, bits; /* @@ -285,10 +267,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, * registers. Therefore we arbitrarily choose to read it from the * register for node 0. */ - intlv_en = pvt->dram_IntlvEn[0]; + intlv_en = dram_intlv_en(pvt, 0); if (intlv_en == 0) { - for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { + for (node_id = 0; node_id < DRAM_RANGES; node_id++) { if (amd64_base_limit_match(pvt, sys_addr, node_id)) goto found; } @@ -305,10 +287,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, bits = (((u32) sys_addr) >> 12) & intlv_en; for (node_id = 0; ; ) { - if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) + if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits) break; /* intlv_sel field matches */ - if (++node_id >= DRAM_REG_COUNT) + if (++node_id >= DRAM_RANGES) goto err_no_match; } @@ -321,7 +303,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, } found: - return edac_mc_find(node_id); + return edac_mc_find((int)node_id); err_no_match: debugf2("sys_addr 0x%lx doesn't match any node\n", @@ -331,37 +313,50 @@ err_no_match: } /* - * Extract the DRAM CS base address from selected csrow register. + * compute the CS base address of the @csrow on the DRAM controller @dct. + * For details see F2x[5C:40] in the processor's BKDG */ -static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow) +static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct, + u64 *base, u64 *mask) { - return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) << - pvt->dcs_shift; -} + u64 csbase, csmask, base_bits, mask_bits; + u8 addr_shift; -/* - * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way. - */ -static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow) -{ - u64 dcsm_bits, other_bits; - u64 mask; - - /* Extract bits from DRAM CS Mask. */ - dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask; + if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { + csbase = pvt->csels[dct].csbases[csrow]; + csmask = pvt->csels[dct].csmasks[csrow]; + base_bits = GENMASK(21, 31) | GENMASK(9, 15); + mask_bits = GENMASK(21, 29) | GENMASK(9, 15); + addr_shift = 4; + } else { + csbase = pvt->csels[dct].csbases[csrow]; + csmask = pvt->csels[dct].csmasks[csrow >> 1]; + addr_shift = 8; - other_bits = pvt->dcsm_mask; - other_bits = ~(other_bits << pvt->dcs_shift); + if (boot_cpu_data.x86 == 0x15) + base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13); + else + base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13); + } - /* - * The extracted bits from DCSM belong in the spaces represented by - * the cleared bits in other_bits. - */ - mask = (dcsm_bits << pvt->dcs_shift) | other_bits; + *base = (csbase & base_bits) << addr_shift; - return mask; + *mask = ~0ULL; + /* poke holes for the csmask */ + *mask &= ~(mask_bits << addr_shift); + /* OR them in */ + *mask |= (csmask & mask_bits) << addr_shift; } +#define for_each_chip_select(i, dct, pvt) \ + for (i = 0; i < pvt->csels[dct].b_cnt; i++) + +#define chip_select_base(i, dct, pvt) \ + pvt->csels[dct].csbases[i] + +#define for_each_chip_select_mask(i, dct, pvt) \ + for (i = 0; i < pvt->csels[dct].m_cnt; i++) + /* * @input_addr is an InputAddr associated with the node given by mci. Return the * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr). @@ -374,19 +369,13 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) pvt = mci->pvt_info; - /* - * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS - * base/mask register pair, test the condition shown near the start of - * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). - */ - for (csrow = 0; csrow < pvt->cs_count; csrow++) { - - /* This DRAM chip select is disabled on this node */ - if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) + for_each_chip_select(csrow, 0, pvt) { + if (!csrow_enabled(csrow, 0, pvt)) continue; - base = base_from_dct_base(pvt, csrow); - mask = ~mask_from_dct_mask(pvt, csrow); + get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); + + mask = ~mask; if ((input_addr & mask) == (base & mask)) { debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n", @@ -396,7 +385,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) return csrow; } } - debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n", (unsigned long)input_addr, pvt->mc_node_id); @@ -404,19 +392,6 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) } /* - * Return the base value defined by the DRAM Base register for the node - * represented by mci. This function returns the full 40-bit value despite the - * fact that the register only stores bits 39-24 of the value. See section - * 3.4.4.1 (BKDG #26094, K8, revA-E) - */ -static inline u64 get_dram_base(struct mem_ctl_info *mci) -{ - struct amd64_pvt *pvt = mci->pvt_info; - - return pvt->dram_base[pvt->mc_node_id]; -} - -/* * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094) * for the node represented by mci. Info is passed back in *hole_base, * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if @@ -445,14 +420,13 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, return 1; } - /* only valid for Fam10h */ - if (boot_cpu_data.x86 == 0x10 && - (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) { + /* valid for Fam10h and above */ + if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) { debugf1(" Dram Memory Hoisting is DISABLED on this system\n"); return 1; } - if ((pvt->dhar & DHAR_VALID) == 0) { + if (!dhar_valid(pvt)) { debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n", pvt->mc_node_id); return 1; @@ -476,15 +450,15 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, * addresses in the hole so that they start at 0x100000000. */ - base = dhar_base(pvt->dhar); + base = dhar_base(pvt); *hole_base = base; *hole_size = (0x1ull << 32) - base; if (boot_cpu_data.x86 > 0xf) - *hole_offset = f10_dhar_offset(pvt->dhar); + *hole_offset = f10_dhar_offset(pvt); else - *hole_offset = k8_dhar_offset(pvt->dhar); + *hole_offset = k8_dhar_offset(pvt); debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n", pvt->mc_node_id, (unsigned long)*hole_base, @@ -525,10 +499,11 @@ EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info); */ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) { + struct amd64_pvt *pvt = mci->pvt_info; u64 dram_base, hole_base, hole_offset, hole_size, dram_addr; int ret = 0; - dram_base = get_dram_base(mci); + dram_base = get_dram_base(pvt, pvt->mc_node_id); ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, &hole_size); @@ -556,7 +531,7 @@ static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr) * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture * Programmer's Manual Volume 1 Application Programming. */ - dram_addr = (sys_addr & 0xffffffffffull) - dram_base; + dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base; debugf2("using DRAM Base register to translate SysAddr 0x%lx to " "DramAddr 0x%lx\n", (unsigned long)sys_addr, @@ -592,9 +567,9 @@ static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr) * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E) * concerning translating a DramAddr to an InputAddr. */ - intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); - input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) + - (dram_addr & 0xfff); + intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); + input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) + + (dram_addr & 0xfff); debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n", intlv_shift, (unsigned long)dram_addr, @@ -628,7 +603,7 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr) static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) { struct amd64_pvt *pvt; - int node_id, intlv_shift; + unsigned node_id, intlv_shift; u64 bits, dram_addr; u32 intlv_sel; @@ -642,10 +617,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) */ pvt = mci->pvt_info; node_id = pvt->mc_node_id; - BUG_ON((node_id < 0) || (node_id > 7)); - intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]); + BUG_ON(node_id > 7); + intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0)); if (intlv_shift == 0) { debugf1(" InputAddr 0x%lx translates to DramAddr of " "same value\n", (unsigned long)input_addr); @@ -653,10 +628,10 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) return input_addr; } - bits = ((input_addr & 0xffffff000ull) << intlv_shift) + - (input_addr & 0xfff); + bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) + + (input_addr & 0xfff); - intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1); + intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1); dram_addr = bits + (intlv_sel << 12); debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx " @@ -673,7 +648,7 @@ static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr) static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) { struct amd64_pvt *pvt = mci->pvt_info; - u64 hole_base, hole_offset, hole_size, base, limit, sys_addr; + u64 hole_base, hole_offset, hole_size, base, sys_addr; int ret = 0; ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset, @@ -691,7 +666,7 @@ static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr) } } - amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit); + base = get_dram_base(pvt, pvt->mc_node_id); sys_addr = dram_addr + base; /* @@ -736,13 +711,12 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, u64 base, mask; pvt = mci->pvt_info; - BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); + BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt)); - base = base_from_dct_base(pvt, csrow); - mask = mask_from_dct_mask(pvt, csrow); + get_cs_base_and_mask(pvt, csrow, 0, &base, &mask); *input_addr_min = base & ~mask; - *input_addr_max = base | mask | pvt->dcs_mask_notused; + *input_addr_max = base | mask; } /* Map the Error address to a PAGE and PAGE OFFSET. */ @@ -775,18 +749,13 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr) static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16); -static u16 extract_syndrome(struct err_regs *err) -{ - return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00); -} - /* * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs * are ECC capable. */ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) { - int bit; + u8 bit; enum dev_type edac_cap = EDAC_FLAG_NONE; bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F) @@ -799,8 +768,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt) return edac_cap; } - -static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt); +static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8); static void amd64_dump_dramcfg_low(u32 dclr, int chan) { @@ -813,8 +781,9 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) debugf1(" PAR/ERR parity: %s\n", (dclr & BIT(8)) ? "enabled" : "disabled"); - debugf1(" DCT 128bit mode width: %s\n", - (dclr & BIT(11)) ? "128b" : "64b"); + if (boot_cpu_data.x86 == 0x10) + debugf1(" DCT 128bit mode width: %s\n", + (dclr & BIT(11)) ? "128b" : "64b"); debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n", (dclr & BIT(12)) ? "yes" : "no", @@ -824,16 +793,16 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan) } /* Display and decode various NB registers for debug purposes. */ -static void amd64_dump_misc_regs(struct amd64_pvt *pvt) +static void dump_misc_regs(struct amd64_pvt *pvt) { debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap); debugf1(" NB two channel DRAM capable: %s\n", - (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no"); + (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no"); debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n", - (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no", - (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no"); + (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no", + (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no"); amd64_dump_dramcfg_low(pvt->dclr0, 0); @@ -841,130 +810,84 @@ static void amd64_dump_misc_regs(struct amd64_pvt *pvt) debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, " "offset: 0x%08x\n", - pvt->dhar, - dhar_base(pvt->dhar), - (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar) - : f10_dhar_offset(pvt->dhar)); + pvt->dhar, dhar_base(pvt), + (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt) + : f10_dhar_offset(pvt)); - debugf1(" DramHoleValid: %s\n", - (pvt->dhar & DHAR_VALID) ? "yes" : "no"); + debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no"); - amd64_debug_display_dimm_sizes(0, pvt); + amd64_debug_display_dimm_sizes(pvt, 0); /* everything below this point is Fam10h and above */ if (boot_cpu_data.x86 == 0xf) return; - amd64_debug_display_dimm_sizes(1, pvt); + amd64_debug_display_dimm_sizes(pvt, 1); - amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4")); + amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4")); /* Only if NOT ganged does dclr1 have valid info */ if (!dct_ganging_enabled(pvt)) amd64_dump_dramcfg_low(pvt->dclr1, 1); } -/* Read in both of DBAM registers */ -static void amd64_read_dbam_reg(struct amd64_pvt *pvt) -{ - amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0); - - if (boot_cpu_data.x86 >= 0x10) - amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1); -} - /* - * NOTE: CPU Revision Dependent code: Rev E and Rev F - * - * Set the DCSB and DCSM mask values depending on the CPU revision value. Also - * set the shift factor for the DCSB and DCSM values. - * - * ->dcs_mask_notused, RevE: - * - * To find the max InputAddr for the csrow, start with the base address and set - * all bits that are "don't care" bits in the test at the start of section - * 3.5.4 (p. 84). - * - * The "don't care" bits are all set bits in the mask and all bits in the gaps - * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS - * represents bits [24:20] and [12:0], which are all bits in the above-mentioned - * gaps. - * - * ->dcs_mask_notused, RevF and later: - * - * To find the max InputAddr for the csrow, start with the base address and set - * all bits that are "don't care" bits in the test at the start of NPT section - * 4.5.4 (p. 87). - * - * The "don't care" bits are all set bits in the mask and all bits in the gaps - * between bit ranges [36:27] and [21:13]. - * - * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0], - * which are all bits in the above-mentioned gaps. + * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60] */ -static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) +static void prep_chip_selects(struct amd64_pvt *pvt) { - if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) { - pvt->dcsb_base = REV_E_DCSB_BASE_BITS; - pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; - pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; - pvt->dcs_shift = REV_E_DCS_SHIFT; - pvt->cs_count = 8; - pvt->num_dcsm = 8; + pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; + pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8; } else { - pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; - pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; - pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; - pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; - pvt->cs_count = 8; - pvt->num_dcsm = 4; + pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8; + pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4; } } /* - * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers + * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers */ -static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) +static void read_dct_base_mask(struct amd64_pvt *pvt) { - int cs, reg; + int cs; - amd64_set_dct_base_and_mask(pvt); + prep_chip_selects(pvt); - for (cs = 0; cs < pvt->cs_count; cs++) { - reg = K8_DCSB0 + (cs * 4); - if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs])) + for_each_chip_select(cs, 0, pvt) { + int reg0 = DCSB0 + (cs * 4); + int reg1 = DCSB1 + (cs * 4); + u32 *base0 = &pvt->csels[0].csbases[cs]; + u32 *base1 = &pvt->csels[1].csbases[cs]; + + if (!amd64_read_dct_pci_cfg(pvt, reg0, base0)) debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", - cs, pvt->dcsb0[cs], reg); - - /* If DCT are NOT ganged, then read in DCT1's base */ - if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { - reg = F10_DCSB1 + (cs * 4); - if (!amd64_read_pci_cfg(pvt->F2, reg, - &pvt->dcsb1[cs])) - debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", - cs, pvt->dcsb1[cs], reg); - } else { - pvt->dcsb1[cs] = 0; - } + cs, *base0, reg0); + + if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) + continue; + + if (!amd64_read_dct_pci_cfg(pvt, reg1, base1)) + debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", + cs, *base1, reg1); } - for (cs = 0; cs < pvt->num_dcsm; cs++) { - reg = K8_DCSM0 + (cs * 4); - if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs])) + for_each_chip_select_mask(cs, 0, pvt) { + int reg0 = DCSM0 + (cs * 4); + int reg1 = DCSM1 + (cs * 4); + u32 *mask0 = &pvt->csels[0].csmasks[cs]; + u32 *mask1 = &pvt->csels[1].csmasks[cs]; + + if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0)) debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", - cs, pvt->dcsm0[cs], reg); - - /* If DCT are NOT ganged, then read in DCT1's mask */ - if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { - reg = F10_DCSM1 + (cs * 4); - if (!amd64_read_pci_cfg(pvt->F2, reg, - &pvt->dcsm1[cs])) - debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", - cs, pvt->dcsm1[cs], reg); - } else { - pvt->dcsm1[cs] = 0; - } + cs, *mask0, reg0); + + if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt)) + continue; + + if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1)) + debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", + cs, *mask1, reg1); } } @@ -972,7 +895,10 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) { enum mem_type type; - if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) { + /* F15h supports only DDR3 */ + if (boot_cpu_data.x86 >= 0x15) + type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; + else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) { if (pvt->dchr0 & DDR3_MODE) type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3; else @@ -986,26 +912,14 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs) return type; } -/* - * Read the DRAM Configuration Low register. It differs between CG, D & E revs - * and the later RevF memory controllers (DDR vs DDR2) - * - * Return: - * number of memory channels in operation - * Pass back: - * contents of the DCL0_LOW register - */ +/* Get the number of DCT channels the memory controller is using. */ static int k8_early_channel_count(struct amd64_pvt *pvt) { - int flag, err = 0; - - err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); - if (err) - return err; + int flag; if (pvt->ext_model >= K8_REV_F) /* RevF (NPT) and later */ - flag = pvt->dclr0 & F10_WIDTH_128; + flag = pvt->dclr0 & WIDTH_128; else /* RevE and earlier */ flag = pvt->dclr0 & REVE_WIDTH_128; @@ -1016,55 +930,47 @@ static int k8_early_channel_count(struct amd64_pvt *pvt) return (flag) ? 2 : 1; } -/* extract the ERROR ADDRESS for the K8 CPUs */ -static u64 k8_get_error_address(struct mem_ctl_info *mci, - struct err_regs *info) +/* On F10h and later ErrAddr is MC4_ADDR[47:1] */ +static u64 get_error_address(struct mce *m) { - return (((u64) (info->nbeah & 0xff)) << 32) + - (info->nbeal & ~0x03); + u8 start_bit = 1; + u8 end_bit = 47; + + if (boot_cpu_data.x86 == 0xf) { + start_bit = 3; + end_bit = 39; + } + + return m->addr & GENMASK(start_bit, end_bit); } -/* - * Read the Base and Limit registers for K8 based Memory controllers; extract - * fields from the 'raw' reg into separate data fields - * - * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN - */ -static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) +static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range) { - u32 low; - u32 off = dram << 3; /* 8 bytes between DRAM entries */ + int off = range << 3; - amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low); + amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo); + amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo); - /* Extract parts into separate data entries */ - pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; - pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; - pvt->dram_rw_en[dram] = (low & 0x3); + if (boot_cpu_data.x86 == 0xf) + return; - amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low); + if (!dram_rw(pvt, range)) + return; - /* - * Extract parts into separate data entries. Limit is the HIGHEST memory - * location of the region, so lower 24 bits need to be all ones - */ - pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; - pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; - pvt->dram_DstNode[dram] = (low & 0x7); + amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi); + amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi); } -static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, - struct err_regs *err_info, u64 sys_addr) +static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, + u16 syndrome) { struct mem_ctl_info *src_mci; + struct amd64_pvt *pvt = mci->pvt_info; int channel, csrow; u32 page, offset; - u16 syndrome; - - syndrome = extract_syndrome(err_info); /* CHIPKILL enabled */ - if (err_info->nbcfg & K8_NBCFG_CHIPKILL) { + if (pvt->nbcfg & NBCFG_CHIPKILL) { channel = get_channel_from_ecc_syndrome(mci, syndrome); if (channel < 0) { /* @@ -1113,18 +1019,41 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, } } -static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) +static int ddr2_cs_size(unsigned i, bool dct_width) { - int *dbam_map; + unsigned shift = 0; - if (pvt->ext_model >= K8_REV_F) - dbam_map = ddr2_dbam; - else if (pvt->ext_model >= K8_REV_D) - dbam_map = ddr2_dbam_revD; + if (i <= 2) + shift = i; + else if (!(i & 0x1)) + shift = i >> 1; else - dbam_map = ddr2_dbam_revCG; + shift = (i + 1) >> 1; - return dbam_map[cs_mode]; + return 128 << (shift + !!dct_width); +} + +static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, + unsigned cs_mode) +{ + u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; + + if (pvt->ext_model >= K8_REV_F) { + WARN_ON(cs_mode > 11); + return ddr2_cs_size(cs_mode, dclr & WIDTH_128); + } + else if (pvt->ext_model >= K8_REV_D) { + WARN_ON(cs_mode > 10); + + if (cs_mode == 3 || cs_mode == 8) + return 32 << (cs_mode - 1); + else + return 32 << cs_mode; + } + else { + WARN_ON(cs_mode > 6); + return 32 << cs_mode; + } } /* @@ -1135,17 +1064,13 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) * Pass back: * contents of the DCL0_LOW register */ -static int f10_early_channel_count(struct amd64_pvt *pvt) +static int f1x_early_channel_count(struct amd64_pvt *pvt) { - int dbams[] = { DBAM0, DBAM1 }; int i, j, channels = 0; - u32 dbam; - /* If we are in 128 bit mode, then we are using 2 channels */ - if (pvt->dclr0 & F10_WIDTH_128) { - channels = 2; - return channels; - } + /* On F10h, if we are in 128 bit mode, then we are using 2 channels */ + if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128)) + return 2; /* * Need to check if in unganged mode: In such, there are 2 channels, @@ -1162,9 +1087,8 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) * is more than just one DIMM present in unganged mode. Need to check * both controllers since DIMMs can be placed in either one. */ - for (i = 0; i < ARRAY_SIZE(dbams); i++) { - if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam)) - goto err_reg; + for (i = 0; i < 2; i++) { + u32 dbam = (i ? pvt->dbam1 : pvt->dbam0); for (j = 0; j < 4; j++) { if (DBAM_DIMM(j, dbam) > 0) { @@ -1180,216 +1104,191 @@ static int f10_early_channel_count(struct amd64_pvt *pvt) amd64_info("MCT channel count: %d\n", channels); return channels; - -err_reg: - return -1; - } -static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode) +static int ddr3_cs_size(unsigned i, bool dct_width) { - int *dbam_map; + unsigned shift = 0; + int cs_size = 0; - if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) - dbam_map = ddr3_dbam; + if (i == 0 || i == 3 || i == 4) + cs_size = -1; + else if (i <= 2) + shift = i; + else if (i == 12) + shift = 7; + else if (!(i & 0x1)) + shift = i >> 1; else - dbam_map = ddr2_dbam; + shift = (i + 1) >> 1; + + if (cs_size != -1) + cs_size = (128 * (1 << !!dct_width)) << shift; - return dbam_map[cs_mode]; + return cs_size; } -static u64 f10_get_error_address(struct mem_ctl_info *mci, - struct err_regs *info) +static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, + unsigned cs_mode) { - return (((u64) (info->nbeah & 0xffff)) << 32) + - (info->nbeal & ~0x01); + u32 dclr = dct ? pvt->dclr1 : pvt->dclr0; + + WARN_ON(cs_mode > 11); + + if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE) + return ddr3_cs_size(cs_mode, dclr & WIDTH_128); + else + return ddr2_cs_size(cs_mode, dclr & WIDTH_128); } /* - * Read the Base and Limit registers for F10 based Memory controllers. Extract - * fields from the 'raw' reg into separate data fields. - * - * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN. + * F15h supports only 64bit DCT interfaces */ -static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) +static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct, + unsigned cs_mode) { - u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit; - - low_offset = K8_DRAM_BASE_LOW + (dram << 3); - high_offset = F10_DRAM_BASE_HIGH + (dram << 3); - - /* read the 'raw' DRAM BASE Address register */ - amd64_read_pci_cfg(pvt->F1, low_offset, &low_base); - amd64_read_pci_cfg(pvt->F1, high_offset, &high_base); - - /* Extract parts into separate data entries */ - pvt->dram_rw_en[dram] = (low_base & 0x3); - - if (pvt->dram_rw_en[dram] == 0) - return; - - pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; - - pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | - (((u64)low_base & 0xFFFF0000) << 8); - - low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); - high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); - - /* read the 'raw' LIMIT registers */ - amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit); - amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit); - - pvt->dram_DstNode[dram] = (low_limit & 0x7); - pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; + WARN_ON(cs_mode > 12); - /* - * Extract address values and form a LIMIT address. Limit is the HIGHEST - * memory location of the region, so low 24 bits need to be all ones. - */ - pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | - (((u64) low_limit & 0xFFFF0000) << 8) | - 0x00FFFFFF; + return ddr3_cs_size(cs_mode, false); } -static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) +static void read_dram_ctl_register(struct amd64_pvt *pvt) { - if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW, - &pvt->dram_ctl_select_low)) { - debugf0("F2x110 (DCTL Sel. Low): 0x%08x, " - "High range addresses at: 0x%x\n", - pvt->dram_ctl_select_low, - dct_sel_baseaddr(pvt)); + if (boot_cpu_data.x86 == 0xf) + return; + + if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) { + debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n", + pvt->dct_sel_lo, dct_sel_baseaddr(pvt)); - debugf0(" DCT mode: %s, All DCTs on: %s\n", - (dct_ganging_enabled(pvt) ? "ganged" : "unganged"), - (dct_dram_enabled(pvt) ? "yes" : "no")); + debugf0(" DCTs operate in %s mode.\n", + (dct_ganging_enabled(pvt) ? "ganged" : "unganged")); if (!dct_ganging_enabled(pvt)) debugf0(" Address range split per DCT: %s\n", (dct_high_range_enabled(pvt) ? "yes" : "no")); - debugf0(" DCT data interleave for ECC: %s, " + debugf0(" data interleave for ECC: %s, " "DRAM cleared since last warm reset: %s\n", (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"), (dct_memory_cleared(pvt) ? "yes" : "no")); - debugf0(" DCT channel interleave: %s, " - "DCT interleave bits selector: 0x%x\n", + debugf0(" channel interleave: %s, " + "interleave bits selector: 0x%x\n", (dct_interleave_enabled(pvt) ? "enabled" : "disabled"), dct_sel_interleave_addr(pvt)); } - amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH, - &pvt->dram_ctl_select_high); + amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi); } /* - * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory + * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory * Interleaving Modes. */ -static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, - int hi_range_sel, u32 intlv_en) +static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr, + bool hi_range_sel, u8 intlv_en) { - u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1; + u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1; if (dct_ganging_enabled(pvt)) - cs = 0; - else if (hi_range_sel) - cs = dct_sel_high; - else if (dct_interleave_enabled(pvt)) { - /* - * see F2x110[DctSelIntLvAddr] - channel interleave mode - */ - if (dct_sel_interleave_addr(pvt) == 0) - cs = sys_addr >> 6 & 1; - else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) { - temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; + return 0; - if (dct_sel_interleave_addr(pvt) & 1) - cs = (sys_addr >> 9 & 1) ^ temp; - else - cs = (sys_addr >> 6 & 1) ^ temp; - } else if (intlv_en & 4) - cs = sys_addr >> 15 & 1; - else if (intlv_en & 2) - cs = sys_addr >> 14 & 1; - else if (intlv_en & 1) - cs = sys_addr >> 13 & 1; - else - cs = sys_addr >> 12 & 1; - } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) - cs = ~dct_sel_high & 1; - else - cs = 0; + if (hi_range_sel) + return dct_sel_high; - return cs; -} + /* + * see F2x110[DctSelIntLvAddr] - channel interleave mode + */ + if (dct_interleave_enabled(pvt)) { + u8 intlv_addr = dct_sel_interleave_addr(pvt); -static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en) -{ - if (intlv_en == 1) - return 1; - else if (intlv_en == 3) - return 2; - else if (intlv_en == 7) - return 3; + /* return DCT select function: 0=DCT0, 1=DCT1 */ + if (!intlv_addr) + return sys_addr >> 6 & 1; + + if (intlv_addr & 0x2) { + u8 shift = intlv_addr & 0x1 ? 9 : 6; + u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2; + + return ((sys_addr >> shift) & 1) ^ temp; + } + + return (sys_addr >> (12 + hweight8(intlv_en))) & 1; + } + + if (dct_high_range_enabled(pvt)) + return ~dct_sel_high & 1; return 0; } -/* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */ -static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel, - u32 dct_sel_base_addr, - u64 dct_sel_base_off, - u32 hole_valid, u32 hole_off, - u64 dram_base) +/* Convert the sys_addr to the normalized DCT address */ +static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range, + u64 sys_addr, bool hi_rng, + u32 dct_sel_base_addr) { u64 chan_off; + u64 dram_base = get_dram_base(pvt, range); + u64 hole_off = f10_dhar_offset(pvt); + u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16; - if (hi_range_sel) { - if (!(dct_sel_base_addr & 0xFFFF0000) && - hole_valid && (sys_addr >= 0x100000000ULL)) - chan_off = hole_off << 16; + if (hi_rng) { + /* + * if + * base address of high range is below 4Gb + * (bits [47:27] at [31:11]) + * DRAM address space on this DCT is hoisted above 4Gb && + * sys_addr > 4Gb + * + * remove hole offset from sys_addr + * else + * remove high range offset from sys_addr + */ + if ((!(dct_sel_base_addr >> 16) || + dct_sel_base_addr < dhar_base(pvt)) && + dhar_valid(pvt) && + (sys_addr >= BIT_64(32))) + chan_off = hole_off; else chan_off = dct_sel_base_off; } else { - if (hole_valid && (sys_addr >= 0x100000000ULL)) - chan_off = hole_off << 16; + /* + * if + * we have a valid hole && + * sys_addr > 4Gb + * + * remove hole + * else + * remove dram base to normalize to DCT address + */ + if (dhar_valid(pvt) && (sys_addr >= BIT_64(32))) + chan_off = hole_off; else - chan_off = dram_base & 0xFFFFF8000000ULL; + chan_off = dram_base; } - return (sys_addr & 0x0000FFFFFFFFFFC0ULL) - - (chan_off & 0x0000FFFFFF800000ULL); + return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47)); } -/* Hack for the time being - Can we get this from BIOS?? */ -#define CH0SPARE_RANK 0 -#define CH1SPARE_RANK 1 - /* * checks if the csrow passed in is marked as SPARED, if so returns the new * spare row */ -static inline int f10_process_possible_spare(int csrow, - u32 cs, struct amd64_pvt *pvt) -{ - u32 swap_done; - u32 bad_dram_cs; - - /* Depending on channel, isolate respective SPARING info */ - if (cs) { - swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare); - bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare); - if (swap_done && (csrow == bad_dram_cs)) - csrow = CH1SPARE_RANK; - } else { - swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare); - bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare); - if (swap_done && (csrow == bad_dram_cs)) - csrow = CH0SPARE_RANK; +static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow) +{ + int tmp_cs; + + if (online_spare_swap_done(pvt, dct) && + csrow == online_spare_bad_dramcs(pvt, dct)) { + + for_each_chip_select(tmp_cs, dct, pvt) { + if (chip_select_base(tmp_cs, dct, pvt) & 0x2) { + csrow = tmp_cs; + break; + } + } } return csrow; } @@ -1402,11 +1301,11 @@ static inline int f10_process_possible_spare(int csrow, * -EINVAL: NOT FOUND * 0..csrow = Chip-Select Row */ -static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) +static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct) { struct mem_ctl_info *mci; struct amd64_pvt *pvt; - u32 cs_base, cs_mask; + u64 cs_base, cs_mask; int cs_found = -EINVAL; int csrow; @@ -1416,39 +1315,25 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) pvt = mci->pvt_info; - debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); - - for (csrow = 0; csrow < pvt->cs_count; csrow++) { + debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct); - cs_base = amd64_get_dct_base(pvt, cs, csrow); - if (!(cs_base & K8_DCSB_CS_ENABLE)) + for_each_chip_select(csrow, dct, pvt) { + if (!csrow_enabled(csrow, dct, pvt)) continue; - /* - * We have an ENABLED CSROW, Isolate just the MASK bits of the - * target: [28:19] and [13:5], which map to [36:27] and [21:13] - * of the actual address. - */ - cs_base &= REV_F_F1Xh_DCSB_BASE_BITS; - - /* - * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and - * [4:0] to become ON. Then mask off bits [28:0] ([36:8]) - */ - cs_mask = amd64_get_dct_mask(pvt, cs, csrow); + get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask); - debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n", - csrow, cs_base, cs_mask); + debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n", + csrow, cs_base, cs_mask); - cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF; + cs_mask = ~cs_mask; - debugf1(" Final CSMask=0x%x\n", cs_mask); - debugf1(" (InputAddr & ~CSMask)=0x%x " - "(CSBase & ~CSMask)=0x%x\n", - (in_addr & ~cs_mask), (cs_base & ~cs_mask)); + debugf1(" (InputAddr & ~CSMask)=0x%llx " + "(CSBase & ~CSMask)=0x%llx\n", + (in_addr & cs_mask), (cs_base & cs_mask)); - if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) { - cs_found = f10_process_possible_spare(csrow, cs, pvt); + if ((in_addr & cs_mask) == (cs_base & cs_mask)) { + cs_found = f10_process_possible_spare(pvt, dct, csrow); debugf1(" MATCH csrow=%d\n", cs_found); break; @@ -1457,38 +1342,75 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) return cs_found; } -/* For a given @dram_range, check if @sys_addr falls within it. */ -static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, - u64 sys_addr, int *nid, int *chan_sel) +/* + * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is + * swapped with a region located at the bottom of memory so that the GPU can use + * the interleaved region and thus two channels. + */ +static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr) { - int node_id, cs_found = -EINVAL, high_range = 0; - u32 intlv_en, intlv_sel, intlv_shift, hole_off; - u32 hole_valid, tmp, dct_sel_base, channel; - u64 dram_base, chan_addr, dct_sel_base_off; + u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr; - dram_base = pvt->dram_base[dram_range]; - intlv_en = pvt->dram_IntlvEn[dram_range]; + if (boot_cpu_data.x86 == 0x10) { + /* only revC3 and revE have that feature */ + if (boot_cpu_data.x86_model < 4 || + (boot_cpu_data.x86_model < 0xa && + boot_cpu_data.x86_mask < 3)) + return sys_addr; + } - node_id = pvt->dram_DstNode[dram_range]; - intlv_sel = pvt->dram_IntlvSel[dram_range]; + amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg); - debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n", - dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]); + if (!(swap_reg & 0x1)) + return sys_addr; - /* - * This assumes that one node's DHAR is the same as all the other - * nodes' DHAR. - */ - hole_off = (pvt->dhar & 0x0000FF80); - hole_valid = (pvt->dhar & 0x1); - dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16; + swap_base = (swap_reg >> 3) & 0x7f; + swap_limit = (swap_reg >> 11) & 0x7f; + rgn_size = (swap_reg >> 20) & 0x7f; + tmp_addr = sys_addr >> 27; - debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", - hole_off, hole_valid, intlv_sel); + if (!(sys_addr >> 34) && + (((tmp_addr >= swap_base) && + (tmp_addr <= swap_limit)) || + (tmp_addr < rgn_size))) + return sys_addr ^ (u64)swap_base << 27; + + return sys_addr; +} + +/* For a given @dram_range, check if @sys_addr falls within it. */ +static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range, + u64 sys_addr, int *nid, int *chan_sel) +{ + int cs_found = -EINVAL; + u64 chan_addr; + u32 dct_sel_base; + u8 channel; + bool high_range = false; + + u8 node_id = dram_dst_node(pvt, range); + u8 intlv_en = dram_intlv_en(pvt, range); + u32 intlv_sel = dram_intlv_sel(pvt, range); + + debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n", + range, sys_addr, get_dram_limit(pvt, range)); + + if (dhar_valid(pvt) && + dhar_base(pvt) <= sys_addr && + sys_addr < BIT_64(32)) { + amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n", + sys_addr); + return -EINVAL; + } if (intlv_en && - (intlv_sel != ((sys_addr >> 12) & intlv_en))) + (intlv_sel != ((sys_addr >> 12) & intlv_en))) { + amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n", + intlv_en, intlv_sel); return -EINVAL; + } + + sys_addr = f1x_swap_interleaved_region(pvt, sys_addr); dct_sel_base = dct_sel_baseaddr(pvt); @@ -1499,38 +1421,41 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt) && ((sys_addr >> 27) >= (dct_sel_base >> 11))) - high_range = 1; - - channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en); + high_range = true; - chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base, - dct_sel_base_off, hole_valid, - hole_off, dram_base); + channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en); - intlv_shift = f10_map_intlv_en_to_shift(intlv_en); + chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr, + high_range, dct_sel_base); - /* remove Node ID (in case of memory interleaving) */ - tmp = chan_addr & 0xFC0; + /* Remove node interleaving, see F1x120 */ + if (intlv_en) + chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) | + (chan_addr & 0xfff); - chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp; - - /* remove channel interleave and hash */ + /* remove channel interleave */ if (dct_interleave_enabled(pvt) && !dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt)) { - if (dct_sel_interleave_addr(pvt) != 1) - chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL; - else { - tmp = chan_addr & 0xFC0; - chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1) - | tmp; - } + + if (dct_sel_interleave_addr(pvt) != 1) { + if (dct_sel_interleave_addr(pvt) == 0x3) + /* hash 9 */ + chan_addr = ((chan_addr >> 10) << 9) | + (chan_addr & 0x1ff); + else + /* A[6] or hash 6 */ + chan_addr = ((chan_addr >> 7) << 6) | + (chan_addr & 0x3f); + } else + /* A[12] */ + chan_addr = ((chan_addr >> 13) << 12) | + (chan_addr & 0xfff); } - debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n", - chan_addr, (u32)(chan_addr >> 8)); + debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr); - cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel); + cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel); if (cs_found >= 0) { *nid = node_id; @@ -1539,23 +1464,21 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, return cs_found; } -static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, +static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, int *node, int *chan_sel) { - int dram_range, cs_found = -EINVAL; - u64 dram_base, dram_limit; + int cs_found = -EINVAL; + unsigned range; - for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) { + for (range = 0; range < DRAM_RANGES; range++) { - if (!pvt->dram_rw_en[dram_range]) + if (!dram_rw(pvt, range)) continue; - dram_base = pvt->dram_base[dram_range]; - dram_limit = pvt->dram_limit[dram_range]; + if ((get_dram_base(pvt, range) <= sys_addr) && + (get_dram_limit(pvt, range) >= sys_addr)) { - if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) { - - cs_found = f10_match_to_this_node(pvt, dram_range, + cs_found = f1x_match_to_this_node(pvt, range, sys_addr, node, chan_sel); if (cs_found >= 0) @@ -1572,16 +1495,14 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr, * The @sys_addr is usually an error address received from the hardware * (MCX_ADDR). */ -static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, - struct err_regs *err_info, - u64 sys_addr) +static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr, + u16 syndrome) { struct amd64_pvt *pvt = mci->pvt_info; u32 page, offset; int nid, csrow, chan = 0; - u16 syndrome; - csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); + csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); if (csrow < 0) { edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); @@ -1590,14 +1511,12 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, error_address_to_page_and_offset(sys_addr, &page, &offset); - syndrome = extract_syndrome(err_info); - /* * We need the syndromes for channel detection only when we're * ganged. Otherwise @chan should already contain the channel at * this point. */ - if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL)) + if (dct_ganging_enabled(pvt)) chan = get_channel_from_ecc_syndrome(mci, syndrome); if (chan >= 0) @@ -1614,16 +1533,16 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, /* * debug routine to display the memory sizes of all logical DIMMs and its - * CSROWs as well + * CSROWs */ -static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) +static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl) { int dimm, size0, size1, factor = 0; - u32 dbam; - u32 *dcsb; + u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; + u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; if (boot_cpu_data.x86 == 0xf) { - if (pvt->dclr0 & F10_WIDTH_128) + if (pvt->dclr0 & WIDTH_128) factor = 1; /* K8 families < revF not supported yet */ @@ -1634,7 +1553,8 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) } dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0; - dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dcsb1 : pvt->dcsb0; + dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases + : pvt->csels[0].csbases; debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam); @@ -1644,12 +1564,14 @@ static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt) for (dimm = 0; dimm < 4; dimm++) { size0 = 0; - if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) - size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); + if (dcsb[dimm*2] & DCSB_CS_ENABLE) + size0 = pvt->ops->dbam_to_cs(pvt, ctrl, + DBAM_DIMM(dimm, dbam)); size1 = 0; - if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) - size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam)); + if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) + size1 = pvt->ops->dbam_to_cs(pvt, ctrl, + DBAM_DIMM(dimm, dbam)); amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", dimm * 2, size0 << factor, @@ -1664,10 +1586,9 @@ static struct amd64_family_type amd64_family_types[] = { .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC, .ops = { .early_channel_count = k8_early_channel_count, - .get_error_address = k8_get_error_address, - .read_dram_base_limit = k8_read_dram_base_limit, .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, .dbam_to_cs = k8_dbam_to_chip_select, + .read_dct_pci_cfg = k8_read_dct_pci_cfg, } }, [F10_CPUS] = { @@ -1675,12 +1596,21 @@ static struct amd64_family_type amd64_family_types[] = { .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP, .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC, .ops = { - .early_channel_count = f10_early_channel_count, - .get_error_address = f10_get_error_address, - .read_dram_base_limit = f10_read_dram_base_limit, - .read_dram_ctl_register = f10_read_dram_ctl_register, - .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, + .early_channel_count = f1x_early_channel_count, + .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, .dbam_to_cs = f10_dbam_to_chip_select, + .read_dct_pci_cfg = f10_read_dct_pci_cfg, + } + }, + [F15_CPUS] = { + .ctl_name = "F15h", + .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1, + .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3, + .ops = { + .early_channel_count = f1x_early_channel_count, + .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow, + .dbam_to_cs = f15_dbam_to_chip_select, + .read_dct_pci_cfg = f15_read_dct_pci_cfg, } }, }; @@ -1770,15 +1700,15 @@ static u16 x8_vectors[] = { 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000, }; -static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs, - int v_dim) +static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs, + unsigned v_dim) { unsigned int i, err_sym; for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) { u16 s = syndrome; - int v_idx = err_sym * v_dim; - int v_end = (err_sym + 1) * v_dim; + unsigned v_idx = err_sym * v_dim; + unsigned v_end = (err_sym + 1) * v_dim; /* walk over all 16 bits of the syndrome */ for (i = 1; i < (1U << 16); i <<= 1) { @@ -1850,51 +1780,50 @@ static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome) struct amd64_pvt *pvt = mci->pvt_info; int err_sym = -1; - if (pvt->syn_type == 8) + if (pvt->ecc_sym_sz == 8) err_sym = decode_syndrome(syndrome, x8_vectors, ARRAY_SIZE(x8_vectors), - pvt->syn_type); - else if (pvt->syn_type == 4) + pvt->ecc_sym_sz); + else if (pvt->ecc_sym_sz == 4) err_sym = decode_syndrome(syndrome, x4_vectors, ARRAY_SIZE(x4_vectors), - pvt->syn_type); + pvt->ecc_sym_sz); else { - amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type); + amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz); return err_sym; } - return map_err_sym_to_channel(err_sym, pvt->syn_type); + return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz); } /* * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR * ADDRESS and process. */ -static void amd64_handle_ce(struct mem_ctl_info *mci, - struct err_regs *info) +static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m) { struct amd64_pvt *pvt = mci->pvt_info; u64 sys_addr; + u16 syndrome; /* Ensure that the Error Address is VALID */ - if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { + if (!(m->status & MCI_STATUS_ADDRV)) { amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); return; } - sys_addr = pvt->ops->get_error_address(mci, info); + sys_addr = get_error_address(m); + syndrome = extract_syndrome(m->status); amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr); - pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr); + pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome); } /* Handle any Un-correctable Errors (UEs) */ -static void amd64_handle_ue(struct mem_ctl_info *mci, - struct err_regs *info) +static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m) { - struct amd64_pvt *pvt = mci->pvt_info; struct mem_ctl_info *log_mci, *src_mci = NULL; int csrow; u64 sys_addr; @@ -1902,13 +1831,13 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, log_mci = mci; - if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) { + if (!(m->status & MCI_STATUS_ADDRV)) { amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); return; } - sys_addr = pvt->ops->get_error_address(mci, info); + sys_addr = get_error_address(m); /* * Find out which node the error address belongs to. This may be @@ -1936,14 +1865,14 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, } static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, - struct err_regs *info) + struct mce *m) { - u16 ec = EC(info->nbsl); - u8 xec = XEC(info->nbsl, 0x1f); - int ecc_type = (info->nbsh >> 13) & 0x3; + u16 ec = EC(m->status); + u8 xec = XEC(m->status, 0x1f); + u8 ecc_type = (m->status >> 45) & 0x3; /* Bail early out if this was an 'observed' error */ - if (PP(ec) == K8_NBSL_PP_OBS) + if (PP(ec) == NBSL_PP_OBS) return; /* Do only ECC errors */ @@ -1951,34 +1880,16 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, return; if (ecc_type == 2) - amd64_handle_ce(mci, info); + amd64_handle_ce(mci, m); else if (ecc_type == 1) - amd64_handle_ue(mci, info); + amd64_handle_ue(mci, m); } void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) { struct mem_ctl_info *mci = mcis[node_id]; - struct err_regs regs; - - regs.nbsl = (u32) m->status; - regs.nbsh = (u32)(m->status >> 32); - regs.nbeal = (u32) m->addr; - regs.nbeah = (u32)(m->addr >> 32); - regs.nbcfg = nbcfg; - - __amd64_decode_bus_error(mci, ®s); - - /* - * Check the UE bit of the NB status high register, if set generate some - * logs. If NOT a GART error, then process the event as a NO-INFO event. - * If it was a GART error, skip that process. - * - * FIXME: this should go somewhere else, if at all. - */ - if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors) - edac_mc_handle_ue_no_info(mci, "UE bit is set"); + __amd64_decode_bus_error(mci, m); } /* @@ -2027,9 +1938,10 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt) */ static void read_mc_regs(struct amd64_pvt *pvt) { + struct cpuinfo_x86 *c = &boot_cpu_data; u64 msr_val; u32 tmp; - int dram; + unsigned range; /* * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since @@ -2046,75 +1958,66 @@ static void read_mc_regs(struct amd64_pvt *pvt) } else debugf0(" TOP_MEM2 disabled.\n"); - amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap); + amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap); - if (pvt->ops->read_dram_ctl_register) - pvt->ops->read_dram_ctl_register(pvt); + read_dram_ctl_register(pvt); - for (dram = 0; dram < DRAM_REG_COUNT; dram++) { - /* - * Call CPU specific READ function to get the DRAM Base and - * Limit values from the DCT. - */ - pvt->ops->read_dram_base_limit(pvt, dram); + for (range = 0; range < DRAM_RANGES; range++) { + u8 rw; - /* - * Only print out debug info on rows with both R and W Enabled. - * Normal processing, compiler should optimize this whole 'if' - * debug output block away. - */ - if (pvt->dram_rw_en[dram] != 0) { - debugf1(" DRAM-BASE[%d]: 0x%016llx " - "DRAM-LIMIT: 0x%016llx\n", - dram, - pvt->dram_base[dram], - pvt->dram_limit[dram]); - - debugf1(" IntlvEn=%s %s %s " - "IntlvSel=%d DstNode=%d\n", - pvt->dram_IntlvEn[dram] ? - "Enabled" : "Disabled", - (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W", - (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R", - pvt->dram_IntlvSel[dram], - pvt->dram_DstNode[dram]); - } + /* read settings for this DRAM range */ + read_dram_base_limit_regs(pvt, range); + + rw = dram_rw(pvt, range); + if (!rw) + continue; + + debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n", + range, + get_dram_base(pvt, range), + get_dram_limit(pvt, range)); + + debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n", + dram_intlv_en(pvt, range) ? "Enabled" : "Disabled", + (rw & 0x1) ? "R" : "-", + (rw & 0x2) ? "W" : "-", + dram_intlv_sel(pvt, range), + dram_dst_node(pvt, range)); } - amd64_read_dct_base_mask(pvt); + read_dct_base_mask(pvt); - amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar); - amd64_read_dbam_reg(pvt); + amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar); + amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0); amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare); - amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0); - amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0); + amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0); + amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0); - if (boot_cpu_data.x86 >= 0x10) { - if (!dct_ganging_enabled(pvt)) { - amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1); - amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1); - } - amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); + if (!dct_ganging_enabled(pvt)) { + amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1); + amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1); } - if (boot_cpu_data.x86 == 0x10 && - boot_cpu_data.x86_model > 7 && - /* F3x180[EccSymbolSize]=1 => x8 symbols */ - tmp & BIT(25)) - pvt->syn_type = 8; - else - pvt->syn_type = 4; + pvt->ecc_sym_sz = 4; - amd64_dump_misc_regs(pvt); + if (c->x86 >= 0x10) { + amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp); + amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1); + + /* F10h, revD and later can do x8 ECC too */ + if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25)) + pvt->ecc_sym_sz = 8; + } + dump_misc_regs(pvt); } /* * NOTE: CPU Revision Dependent code * * Input: - * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) + * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1) * k8 private pointer to --> * DRAM Bank Address mapping register * node_id @@ -2144,7 +2047,7 @@ static void read_mc_regs(struct amd64_pvt *pvt) * encompasses * */ -static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) +static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) { u32 cs_mode, nr_pages; @@ -2157,7 +2060,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) */ cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; - nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT); + nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); /* * If dual channel then double the memory size of single channel. @@ -2180,23 +2083,22 @@ static int init_csrows(struct mem_ctl_info *mci) { struct csrow_info *csrow; struct amd64_pvt *pvt = mci->pvt_info; - u64 input_addr_min, input_addr_max, sys_addr; + u64 input_addr_min, input_addr_max, sys_addr, base, mask; u32 val; int i, empty = 1; - amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val); + amd64_read_pci_cfg(pvt->F3, NBCFG, &val); pvt->nbcfg = val; - pvt->ctl_error_info.nbcfg = val; debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", pvt->mc_node_id, val, - !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE)); + !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE)); - for (i = 0; i < pvt->cs_count; i++) { + for_each_chip_select(i, 0, pvt) { csrow = &mci->csrows[i]; - if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { + if (!csrow_enabled(i, 0, pvt)) { debugf1("----CSROW %d EMPTY for node %d\n", i, pvt->mc_node_id); continue; @@ -2206,13 +2108,15 @@ static int init_csrows(struct mem_ctl_info *mci) i, pvt->mc_node_id); empty = 0; - csrow->nr_pages = amd64_csrow_nr_pages(i, pvt); + csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i); find_csrow_limits(mci, i, &input_addr_min, &input_addr_max); sys_addr = input_addr_to_sys_addr(mci, input_addr_min); csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT); sys_addr = input_addr_to_sys_addr(mci, input_addr_max); csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT); - csrow->page_mask = ~mask_from_dct_mask(pvt, i); + + get_cs_base_and_mask(pvt, i, 0, &base, &mask); + csrow->page_mask = ~mask; /* 8 bytes of resolution */ csrow->mtype = amd64_determine_memory_type(pvt, i); @@ -2231,9 +2135,9 @@ static int init_csrows(struct mem_ctl_info *mci) /* * determine whether CHIPKILL or JUST ECC or NO ECC is operating */ - if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) + if (pvt->nbcfg & NBCFG_ECC_ENABLE) csrow->edac_mode = - (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? + (pvt->nbcfg & NBCFG_CHIPKILL) ? EDAC_S4ECD4ED : EDAC_SECDED; else csrow->edac_mode = EDAC_NONE; @@ -2243,7 +2147,7 @@ static int init_csrows(struct mem_ctl_info *mci) } /* get all cores on this DCT */ -static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) +static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid) { int cpu; @@ -2253,7 +2157,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid) } /* check MCG_CTL on all the cpus on this node */ -static bool amd64_nb_mce_bank_enabled_on_node(int nid) +static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid) { cpumask_var_t mask; int cpu, nbe; @@ -2270,7 +2174,7 @@ static bool amd64_nb_mce_bank_enabled_on_node(int nid) for_each_cpu(cpu, mask) { struct msr *reg = per_cpu_ptr(msrs, cpu); - nbe = reg->l & K8_MSR_MCGCTL_NBE; + nbe = reg->l & MSR_MCGCTL_NBE; debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n", cpu, reg->q, @@ -2305,16 +2209,16 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on) struct msr *reg = per_cpu_ptr(msrs, cpu); if (on) { - if (reg->l & K8_MSR_MCGCTL_NBE) + if (reg->l & MSR_MCGCTL_NBE) s->flags.nb_mce_enable = 1; - reg->l |= K8_MSR_MCGCTL_NBE; + reg->l |= MSR_MCGCTL_NBE; } else { /* * Turn off NB MCE reporting only when it was off before */ if (!s->flags.nb_mce_enable) - reg->l &= ~K8_MSR_MCGCTL_NBE; + reg->l &= ~MSR_MCGCTL_NBE; } } wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs); @@ -2328,40 +2232,38 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, struct pci_dev *F3) { bool ret = true; - u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; + u32 value, mask = 0x3; /* UECC/CECC enable */ if (toggle_ecc_err_reporting(s, nid, ON)) { amd64_warn("Error enabling ECC reporting over MCGCTL!\n"); return false; } - amd64_read_pci_cfg(F3, K8_NBCTL, &value); + amd64_read_pci_cfg(F3, NBCTL, &value); - /* turn on UECCEn and CECCEn bits */ s->old_nbctl = value & mask; s->nbctl_valid = true; value |= mask; - pci_write_config_dword(F3, K8_NBCTL, value); + amd64_write_pci_cfg(F3, NBCTL, value); - amd64_read_pci_cfg(F3, K8_NBCFG, &value); + amd64_read_pci_cfg(F3, NBCFG, &value); - debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", - nid, value, - !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE)); + debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", + nid, value, !!(value & NBCFG_ECC_ENABLE)); - if (!(value & K8_NBCFG_ECC_ENABLE)) { + if (!(value & NBCFG_ECC_ENABLE)) { amd64_warn("DRAM ECC disabled on this node, enabling...\n"); s->flags.nb_ecc_prev = 0; /* Attempt to turn on DRAM ECC Enable */ - value |= K8_NBCFG_ECC_ENABLE; - pci_write_config_dword(F3, K8_NBCFG, value); + value |= NBCFG_ECC_ENABLE; + amd64_write_pci_cfg(F3, NBCFG, value); - amd64_read_pci_cfg(F3, K8_NBCFG, &value); + amd64_read_pci_cfg(F3, NBCFG, &value); - if (!(value & K8_NBCFG_ECC_ENABLE)) { + if (!(value & NBCFG_ECC_ENABLE)) { amd64_warn("Hardware rejected DRAM ECC enable," "check memory DIMM configuration.\n"); ret = false; @@ -2372,9 +2274,8 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, s->flags.nb_ecc_prev = 1; } - debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n", - nid, value, - !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE)); + debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n", + nid, value, !!(value & NBCFG_ECC_ENABLE)); return ret; } @@ -2382,22 +2283,23 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid, static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid, struct pci_dev *F3) { - u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; + u32 value, mask = 0x3; /* UECC/CECC enable */ + if (!s->nbctl_valid) return; - amd64_read_pci_cfg(F3, K8_NBCTL, &value); + amd64_read_pci_cfg(F3, NBCTL, &value); value &= ~mask; value |= s->old_nbctl; - pci_write_config_dword(F3, K8_NBCTL, value); + amd64_write_pci_cfg(F3, NBCTL, value); /* restore previous BIOS DRAM ECC "off" setting we force-enabled */ if (!s->flags.nb_ecc_prev) { - amd64_read_pci_cfg(F3, K8_NBCFG, &value); - value &= ~K8_NBCFG_ECC_ENABLE; - pci_write_config_dword(F3, K8_NBCFG, value); + amd64_read_pci_cfg(F3, NBCFG, &value); + value &= ~NBCFG_ECC_ENABLE; + amd64_write_pci_cfg(F3, NBCFG, value); } /* restore the NB Enable MCGCTL bit */ @@ -2423,9 +2325,9 @@ static bool ecc_enabled(struct pci_dev *F3, u8 nid) u8 ecc_en = 0; bool nb_mce_en = false; - amd64_read_pci_cfg(F3, K8_NBCFG, &value); + amd64_read_pci_cfg(F3, NBCFG, &value); - ecc_en = !!(value & K8_NBCFG_ECC_ENABLE); + ecc_en = !!(value & NBCFG_ECC_ENABLE); amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled")); nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid); @@ -2463,23 +2365,24 @@ static void set_mc_sysfs_attrs(struct mem_ctl_info *mci) mci->mc_driver_sysfs_attributes = sysfs_attrs; } -static void setup_mci_misc_attrs(struct mem_ctl_info *mci) +static void setup_mci_misc_attrs(struct mem_ctl_info *mci, + struct amd64_family_type *fam) { struct amd64_pvt *pvt = mci->pvt_info; mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE; - if (pvt->nbcap & K8_NBCAP_SECDED) + if (pvt->nbcap & NBCAP_SECDED) mci->edac_ctl_cap |= EDAC_FLAG_SECDED; - if (pvt->nbcap & K8_NBCAP_CHIPKILL) + if (pvt->nbcap & NBCAP_CHIPKILL) mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED; mci->edac_cap = amd64_determine_edac_cap(pvt); mci->mod_name = EDAC_MOD_STR; mci->mod_ver = EDAC_AMD64_VERSION; - mci->ctl_name = pvt->ctl_name; + mci->ctl_name = fam->ctl_name; mci->dev_name = pci_name(pvt->F2); mci->ctl_page_to_phys = NULL; @@ -2500,14 +2403,16 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) case 0xf: fam_type = &amd64_family_types[K8_CPUS]; pvt->ops = &amd64_family_types[K8_CPUS].ops; - pvt->ctl_name = fam_type->ctl_name; - pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS; break; + case 0x10: fam_type = &amd64_family_types[F10_CPUS]; pvt->ops = &amd64_family_types[F10_CPUS].ops; - pvt->ctl_name = fam_type->ctl_name; - pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS; + break; + + case 0x15: + fam_type = &amd64_family_types[F15_CPUS]; + pvt->ops = &amd64_family_types[F15_CPUS].ops; break; default: @@ -2517,7 +2422,7 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt) pvt->ext_model = boot_cpu_data.x86_model >> 4; - amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name, + amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name, (fam == 0xf ? (pvt->ext_model >= K8_REV_F ? "revF or later " : "revE or earlier ") @@ -2564,14 +2469,14 @@ static int amd64_init_one_instance(struct pci_dev *F2) goto err_siblings; ret = -ENOMEM; - mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, nid); + mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid); if (!mci) goto err_siblings; mci->pvt_info = pvt; mci->dev = &pvt->F2->dev; - setup_mci_misc_attrs(mci); + setup_mci_misc_attrs(mci, fam_type); if (init_csrows(mci)) mci->edac_cap = EDAC_FLAG_NONE; @@ -2714,6 +2619,15 @@ static const struct pci_device_id amd64_pci_table[] __devinitdata = { .class = 0, .class_mask = 0, }, + { + .vendor = PCI_VENDOR_ID_AMD, + .device = PCI_DEVICE_ID_AMD_15H_NB_F2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = 0, + .class_mask = 0, + }, + {0, } }; MODULE_DEVICE_TABLE(pci, amd64_pci_table); @@ -2754,7 +2668,7 @@ static int __init amd64_edac_init(void) { int err = -ENODEV; - edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n"); + printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION); opstate_init(); diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 613ec72b0f65..11be36a311eb 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -144,7 +144,7 @@ * sections 3.5.4 and 3.5.5 for more information. */ -#define EDAC_AMD64_VERSION "v3.3.0" +#define EDAC_AMD64_VERSION "3.4.0" #define EDAC_MOD_STR "amd64_edac" /* Extended Model from CPUID, for CPU Revision numbers */ @@ -153,85 +153,64 @@ #define K8_REV_F 4 /* Hardware limit on ChipSelect rows per MC and processors per system */ -#define MAX_CS_COUNT 8 -#define DRAM_REG_COUNT 8 +#define NUM_CHIPSELECTS 8 +#define DRAM_RANGES 8 #define ON true #define OFF false /* + * Create a contiguous bitmask starting at bit position @lo and ending at + * position @hi. For example + * + * GENMASK(21, 39) gives us the 64bit vector 0x000000ffffe00000. + */ +#define GENMASK(lo, hi) (((1ULL << ((hi) - (lo) + 1)) - 1) << (lo)) + +/* * PCI-defined configuration space registers */ +#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 +#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 /* * Function 1 - Address Map */ -#define K8_DRAM_BASE_LOW 0x40 -#define K8_DRAM_LIMIT_LOW 0x44 -#define K8_DHAR 0xf0 - -#define DHAR_VALID BIT(0) -#define F10_DRAM_MEM_HOIST_VALID BIT(1) +#define DRAM_BASE_LO 0x40 +#define DRAM_LIMIT_LO 0x44 -#define DHAR_BASE_MASK 0xff000000 -#define dhar_base(dhar) (dhar & DHAR_BASE_MASK) +#define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7)) +#define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3)) +#define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7)) +#define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7)) -#define K8_DHAR_OFFSET_MASK 0x0000ff00 -#define k8_dhar_offset(dhar) ((dhar & K8_DHAR_OFFSET_MASK) << 16) +#define DHAR 0xf0 +#define dhar_valid(pvt) ((pvt)->dhar & BIT(0)) +#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1)) +#define dhar_base(pvt) ((pvt)->dhar & 0xff000000) +#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16) -#define F10_DHAR_OFFSET_MASK 0x0000ff80 /* NOTE: Extra mask bit vs K8 */ -#define f10_dhar_offset(dhar) ((dhar & F10_DHAR_OFFSET_MASK) << 16) +#define f10_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff80) << 16) +#define DCT_CFG_SEL 0x10C -/* F10 High BASE/LIMIT registers */ -#define F10_DRAM_BASE_HIGH 0x140 -#define F10_DRAM_LIMIT_HIGH 0x144 +#define DRAM_BASE_HI 0x140 +#define DRAM_LIMIT_HI 0x144 /* * Function 2 - DRAM controller */ -#define K8_DCSB0 0x40 -#define F10_DCSB1 0x140 +#define DCSB0 0x40 +#define DCSB1 0x140 +#define DCSB_CS_ENABLE BIT(0) -#define K8_DCSB_CS_ENABLE BIT(0) -#define K8_DCSB_NPT_SPARE BIT(1) -#define K8_DCSB_NPT_TESTFAIL BIT(2) +#define DCSM0 0x60 +#define DCSM1 0x160 -/* - * REV E: select [31:21] and [15:9] from DCSB and the shift amount to form - * the address - */ -#define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) -#define REV_E_DCS_SHIFT 4 - -#define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) -#define REV_F_F1Xh_DCS_SHIFT 8 - -/* - * REV F and later: selects [28:19] and [13:5] from DCSB and the shift amount - * to form the address - */ -#define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) -#define REV_F_DCS_SHIFT 8 - -/* DRAM CS Mask Registers */ -#define K8_DCSM0 0x60 -#define F10_DCSM1 0x160 - -/* REV E: select [29:21] and [15:9] from DCSM */ -#define REV_E_DCSM_MASK_BITS 0x3FE0FE00 - -/* unused bits [24:20] and [12:0] */ -#define REV_E_DCS_NOTUSED_BITS 0x01F01FFF - -/* REV F and later: select [28:19] and [13:5] from DCSM */ -#define REV_F_F1Xh_DCSM_MASK_BITS 0x1FF83FE0 - -/* unused bits [26:22] and [12:0] */ -#define REV_F_F1Xh_DCS_NOTUSED_BITS 0x07C01FFF +#define csrow_enabled(i, dct, pvt) ((pvt)->csels[(dct)].csbases[(i)] & DCSB_CS_ENABLE) #define DBAM0 0x80 #define DBAM1 0x180 @@ -241,148 +220,84 @@ #define DBAM_MAX_VALUE 11 - -#define F10_DCLR_0 0x90 -#define F10_DCLR_1 0x190 +#define DCLR0 0x90 +#define DCLR1 0x190 #define REVE_WIDTH_128 BIT(16) -#define F10_WIDTH_128 BIT(11) +#define WIDTH_128 BIT(11) +#define DCHR0 0x94 +#define DCHR1 0x194 +#define DDR3_MODE BIT(8) -#define F10_DCHR_0 0x94 -#define F10_DCHR_1 0x194 +#define DCT_SEL_LO 0x110 +#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800) +#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3) +#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0)) +#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2)) -#define F10_DCHR_FOUR_RANK_DIMM BIT(18) -#define DDR3_MODE BIT(8) -#define F10_DCHR_MblMode BIT(6) +#define dct_ganging_enabled(pvt) ((boot_cpu_data.x86 == 0x10) && ((pvt)->dct_sel_lo & BIT(4))) +#define dct_data_intlv_enabled(pvt) ((pvt)->dct_sel_lo & BIT(5)) +#define dct_memory_cleared(pvt) ((pvt)->dct_sel_lo & BIT(10)) -#define F10_DCTL_SEL_LOW 0x110 -#define dct_sel_baseaddr(pvt) ((pvt->dram_ctl_select_low) & 0xFFFFF800) -#define dct_sel_interleave_addr(pvt) (((pvt->dram_ctl_select_low) >> 6) & 0x3) -#define dct_high_range_enabled(pvt) (pvt->dram_ctl_select_low & BIT(0)) -#define dct_interleave_enabled(pvt) (pvt->dram_ctl_select_low & BIT(2)) -#define dct_ganging_enabled(pvt) (pvt->dram_ctl_select_low & BIT(4)) -#define dct_data_intlv_enabled(pvt) (pvt->dram_ctl_select_low & BIT(5)) -#define dct_dram_enabled(pvt) (pvt->dram_ctl_select_low & BIT(8)) -#define dct_memory_cleared(pvt) (pvt->dram_ctl_select_low & BIT(10)) +#define SWAP_INTLV_REG 0x10c -#define F10_DCTL_SEL_HIGH 0x114 +#define DCT_SEL_HI 0x114 /* * Function 3 - Misc Control */ -#define K8_NBCTL 0x40 - -/* Correctable ECC error reporting enable */ -#define K8_NBCTL_CECCEn BIT(0) - -/* UnCorrectable ECC error reporting enable */ -#define K8_NBCTL_UECCEn BIT(1) +#define NBCTL 0x40 -#define K8_NBCFG 0x44 -#define K8_NBCFG_CHIPKILL BIT(23) -#define K8_NBCFG_ECC_ENABLE BIT(22) +#define NBCFG 0x44 +#define NBCFG_CHIPKILL BIT(23) +#define NBCFG_ECC_ENABLE BIT(22) -#define K8_NBSL 0x48 - - -/* Family F10h: Normalized Extended Error Codes */ -#define F10_NBSL_EXT_ERR_RES 0x0 +/* F3x48: NBSL */ #define F10_NBSL_EXT_ERR_ECC 0x8 +#define NBSL_PP_OBS 0x2 -/* Next two are overloaded values */ -#define F10_NBSL_EXT_ERR_LINK_PROTO 0xB -#define F10_NBSL_EXT_ERR_L3_PROTO 0xB - -#define F10_NBSL_EXT_ERR_NB_ARRAY 0xC -#define F10_NBSL_EXT_ERR_DRAM_PARITY 0xD -#define F10_NBSL_EXT_ERR_LINK_RETRY 0xE - -/* Next two are overloaded values */ -#define F10_NBSL_EXT_ERR_GART_WALK 0xF -#define F10_NBSL_EXT_ERR_DEV_WALK 0xF - -/* 0x10 to 0x1B: Reserved */ -#define F10_NBSL_EXT_ERR_L3_DATA 0x1C -#define F10_NBSL_EXT_ERR_L3_TAG 0x1D -#define F10_NBSL_EXT_ERR_L3_LRU 0x1E - -/* K8: Normalized Extended Error Codes */ -#define K8_NBSL_EXT_ERR_ECC 0x0 -#define K8_NBSL_EXT_ERR_CRC 0x1 -#define K8_NBSL_EXT_ERR_SYNC 0x2 -#define K8_NBSL_EXT_ERR_MST 0x3 -#define K8_NBSL_EXT_ERR_TGT 0x4 -#define K8_NBSL_EXT_ERR_GART 0x5 -#define K8_NBSL_EXT_ERR_RMW 0x6 -#define K8_NBSL_EXT_ERR_WDT 0x7 -#define K8_NBSL_EXT_ERR_CHIPKILL_ECC 0x8 -#define K8_NBSL_EXT_ERR_DRAM_PARITY 0xD - -/* - * The following are for BUS type errors AFTER values have been normalized by - * shifting right - */ -#define K8_NBSL_PP_SRC 0x0 -#define K8_NBSL_PP_RES 0x1 -#define K8_NBSL_PP_OBS 0x2 -#define K8_NBSL_PP_GENERIC 0x3 - -#define EXTRACT_ERR_CPU_MAP(x) ((x) & 0xF) - -#define K8_NBEAL 0x50 -#define K8_NBEAH 0x54 -#define K8_SCRCTRL 0x58 - -#define F10_NB_CFG_LOW 0x88 +#define SCRCTRL 0x58 #define F10_ONLINE_SPARE 0xB0 -#define F10_ONLINE_SPARE_SWAPDONE0(x) ((x) & BIT(1)) -#define F10_ONLINE_SPARE_SWAPDONE1(x) ((x) & BIT(3)) -#define F10_ONLINE_SPARE_BADDRAM_CS0(x) (((x) >> 4) & 0x00000007) -#define F10_ONLINE_SPARE_BADDRAM_CS1(x) (((x) >> 8) & 0x00000007) +#define online_spare_swap_done(pvt, c) (((pvt)->online_spare >> (1 + 2 * (c))) & 0x1) +#define online_spare_bad_dramcs(pvt, c) (((pvt)->online_spare >> (4 + 4 * (c))) & 0x7) #define F10_NB_ARRAY_ADDR 0xB8 - -#define F10_NB_ARRAY_DRAM_ECC 0x80000000 +#define F10_NB_ARRAY_DRAM_ECC BIT(31) /* Bits [2:1] are used to select 16-byte section within a 64-byte cacheline */ #define SET_NB_ARRAY_ADDRESS(section) (((section) & 0x3) << 1) #define F10_NB_ARRAY_DATA 0xBC - #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ (BIT(((word) & 0xF) + 20) | \ BIT(17) | bits) - #define SET_NB_DRAM_INJECTION_READ(word, bits) \ (BIT(((word) & 0xF) + 20) | \ BIT(16) | bits) -#define K8_NBCAP 0xE8 -#define K8_NBCAP_CORES (BIT(12)|BIT(13)) -#define K8_NBCAP_CHIPKILL BIT(4) -#define K8_NBCAP_SECDED BIT(3) -#define K8_NBCAP_DCT_DUAL BIT(0) +#define NBCAP 0xE8 +#define NBCAP_CHIPKILL BIT(4) +#define NBCAP_SECDED BIT(3) +#define NBCAP_DCT_DUAL BIT(0) #define EXT_NB_MCA_CFG 0x180 /* MSRs */ -#define K8_MSR_MCGCTL_NBE BIT(4) - -#define K8_MSR_MC4CTL 0x0410 -#define K8_MSR_MC4STAT 0x0411 -#define K8_MSR_MC4ADDR 0x0412 +#define MSR_MCGCTL_NBE BIT(4) /* AMD sets the first MC device at device ID 0x18. */ -static inline int get_node_id(struct pci_dev *pdev) +static inline u8 get_node_id(struct pci_dev *pdev) { return PCI_SLOT(pdev->devfn) - 0x18; } -enum amd64_chipset_families { +enum amd_families { K8_CPUS = 0, F10_CPUS, + F15_CPUS, + NUM_FAMILIES, }; /* Error injection control structure */ @@ -392,13 +307,35 @@ struct error_injection { u32 bit_map; }; +/* low and high part of PCI config space regs */ +struct reg_pair { + u32 lo, hi; +}; + +/* + * See F1x[1, 0][7C:40] DRAM Base/Limit Registers + */ +struct dram_range { + struct reg_pair base; + struct reg_pair lim; +}; + +/* A DCT chip selects collection */ +struct chip_select { + u32 csbases[NUM_CHIPSELECTS]; + u8 b_cnt; + + u32 csmasks[NUM_CHIPSELECTS]; + u8 m_cnt; +}; + struct amd64_pvt { struct low_ops *ops; /* pci_device handles which we utilize */ struct pci_dev *F1, *F2, *F3; - int mc_node_id; /* MC index of this MC node */ + unsigned mc_node_id; /* MC index of this MC node */ int ext_model; /* extended model value of this node */ int channel_count; @@ -414,60 +351,50 @@ struct amd64_pvt { u32 dbam0; /* DRAM Base Address Mapping reg for DCT0 */ u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ - /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ - u32 dcsb0[MAX_CS_COUNT]; - u32 dcsb1[MAX_CS_COUNT]; - - /* DRAM CS Mask Registers F2x[1,0][6C:60] */ - u32 dcsm0[MAX_CS_COUNT]; - u32 dcsm1[MAX_CS_COUNT]; - - /* - * Decoded parts of DRAM BASE and LIMIT Registers - * F1x[78,70,68,60,58,50,48,40] - */ - u64 dram_base[DRAM_REG_COUNT]; - u64 dram_limit[DRAM_REG_COUNT]; - u8 dram_IntlvSel[DRAM_REG_COUNT]; - u8 dram_IntlvEn[DRAM_REG_COUNT]; - u8 dram_DstNode[DRAM_REG_COUNT]; - u8 dram_rw_en[DRAM_REG_COUNT]; - - /* - * The following fields are set at (load) run time, after CPU revision - * has been determined, since the dct_base and dct_mask registers vary - * based on revision - */ - u32 dcsb_base; /* DCSB base bits */ - u32 dcsm_mask; /* DCSM mask bits */ - u32 cs_count; /* num chip selects (== num DCSB registers) */ - u32 num_dcsm; /* Number of DCSM registers */ - u32 dcs_mask_notused; /* DCSM notused mask bits */ - u32 dcs_shift; /* DCSB and DCSM shift value */ + /* one for each DCT */ + struct chip_select csels[2]; + + /* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */ + struct dram_range ranges[DRAM_RANGES]; u64 top_mem; /* top of memory below 4GB */ u64 top_mem2; /* top of memory above 4GB */ - u32 dram_ctl_select_low; /* DRAM Controller Select Low Reg */ - u32 dram_ctl_select_high; /* DRAM Controller Select High Reg */ - u32 online_spare; /* On-Line spare Reg */ + u32 dct_sel_lo; /* DRAM Controller Select Low */ + u32 dct_sel_hi; /* DRAM Controller Select High */ + u32 online_spare; /* On-Line spare Reg */ /* x4 or x8 syndromes in use */ - u8 syn_type; - - /* temp storage for when input is received from sysfs */ - struct err_regs ctl_error_info; + u8 ecc_sym_sz; /* place to store error injection parameters prior to issue */ struct error_injection injection; +}; - /* DCT per-family scrubrate setting */ - u32 min_scrubrate; +static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i) +{ + u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8; - /* family name this instance is running on */ - const char *ctl_name; + if (boot_cpu_data.x86 == 0xf) + return addr; -}; + return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr; +} + +static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i) +{ + u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff; + + if (boot_cpu_data.x86 == 0xf) + return lim; + + return (((u64)pvt->ranges[i].lim.hi & 0x000000ff) << 40) | lim; +} + +static inline u16 extract_syndrome(u64 status) +{ + return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00); +} /* * per-node ECC settings descriptor @@ -482,14 +409,6 @@ struct ecc_settings { } flags; }; -extern const char *tt_msgs[4]; -extern const char *ll_msgs[4]; -extern const char *rrrr_msgs[16]; -extern const char *to_msgs[2]; -extern const char *pp_msgs[4]; -extern const char *ii_msgs[4]; -extern const char *htlink_msgs[8]; - #ifdef CONFIG_EDAC_DEBUG #define NUM_DBG_ATTRS 5 #else @@ -511,14 +430,11 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS], */ struct low_ops { int (*early_channel_count) (struct amd64_pvt *pvt); - - u64 (*get_error_address) (struct mem_ctl_info *mci, - struct err_regs *info); - void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram); - void (*read_dram_ctl_register) (struct amd64_pvt *pvt); - void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, - struct err_regs *info, u64 SystemAddr); - int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode); + void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr, + u16 syndrome); + int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct, unsigned cs_mode); + int (*read_dct_pci_cfg) (struct amd64_pvt *pvt, int offset, + u32 *val, const char *func); }; struct amd64_family_type { @@ -527,28 +443,17 @@ struct amd64_family_type { struct low_ops ops; }; -static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset, - u32 *val, const char *func) -{ - int err = 0; - - err = pci_read_config_dword(pdev, offset, val); - if (err) - amd64_warn("%s: error reading F%dx%x.\n", - func, PCI_FUNC(pdev->devfn), offset); - - return err; -} +int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, + u32 val, const char *func); #define amd64_read_pci_cfg(pdev, offset, val) \ - amd64_read_pci_cfg_dword(pdev, offset, val, __func__) + __amd64_read_pci_cfg_dword(pdev, offset, val, __func__) -/* - * For future CPU versions, verify the following as new 'slow' rates appear and - * modify the necessary skip values for the supported CPU. - */ -#define K8_MIN_SCRUB_RATE_BITS 0x0 -#define F10_MIN_SCRUB_RATE_BITS 0x5 +#define amd64_write_pci_cfg(pdev, offset, val) \ + __amd64_write_pci_cfg_dword(pdev, offset, val, __func__) + +#define amd64_read_dct_pci_cfg(pvt, offset, val) \ + pvt->ops->read_dct_pci_cfg(pvt, offset, val, __func__) int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base, u64 *hole_offset, u64 *hole_size); diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c index 688478de1cbd..303f10e03dda 100644 --- a/drivers/edac/amd64_edac_inj.c +++ b/drivers/edac/amd64_edac_inj.c @@ -117,13 +117,13 @@ static ssize_t amd64_inject_read_store(struct mem_ctl_info *mci, /* Form value to choose 16-byte section of cacheline */ section = F10_NB_ARRAY_DRAM_ECC | SET_NB_ARRAY_ADDRESS(pvt->injection.section); - pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section); + amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); word_bits = SET_NB_DRAM_INJECTION_READ(pvt->injection.word, pvt->injection.bit_map); /* Issue 'word' and 'bit' along with the READ request */ - pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits); + amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); @@ -150,13 +150,13 @@ static ssize_t amd64_inject_write_store(struct mem_ctl_info *mci, /* Form value to choose 16-byte section of cacheline */ section = F10_NB_ARRAY_DRAM_ECC | SET_NB_ARRAY_ADDRESS(pvt->injection.section); - pci_write_config_dword(pvt->F3, F10_NB_ARRAY_ADDR, section); + amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_ADDR, section); word_bits = SET_NB_DRAM_INJECTION_WRITE(pvt->injection.word, pvt->injection.bit_map); /* Issue 'word' and 'bit' along with the READ request */ - pci_write_config_dword(pvt->F3, F10_NB_ARRAY_DATA, word_bits); + amd64_write_pci_cfg(pvt->F3, F10_NB_ARRAY_DATA, word_bits); debugf0("section=0x%x word_bits=0x%x\n", section, word_bits); diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 39d97cfdf58c..73196f7b7229 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -785,10 +785,10 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, { int err; - debugf1("%s()\n", __func__); + debugf4("%s()\n", __func__); while (sysfs_attrib) { - debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); + debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); if (sysfs_attrib->grp) { struct mcidev_sysfs_group_kobj *grp_kobj; @@ -818,7 +818,7 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci, if (err < 0) return err; } else if (sysfs_attrib->attr.name) { - debugf0("%s() file %s\n", __func__, + debugf4("%s() file %s\n", __func__, sysfs_attrib->attr.name); err = sysfs_create_file(kobj, &sysfs_attrib->attr); @@ -853,26 +853,26 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci, * Remove first all the atributes */ while (sysfs_attrib) { - debugf1("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); + debugf4("%s() sysfs_attrib = %p\n",__func__, sysfs_attrib); if (sysfs_attrib->grp) { - debugf1("%s() seeking for group %s\n", + debugf4("%s() seeking for group %s\n", __func__, sysfs_attrib->grp->name); list_for_each_entry(grp_kobj, &mci->grp_kobj_list, list) { - debugf1("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp); + debugf4("%s() grp_kobj->grp = %p\n",__func__, grp_kobj->grp); if (grp_kobj->grp == sysfs_attrib->grp) { edac_remove_mci_instance_attributes(mci, grp_kobj->grp->mcidev_attr, &grp_kobj->kobj, count + 1); - debugf0("%s() group %s\n", __func__, + debugf4("%s() group %s\n", __func__, sysfs_attrib->grp->name); kobject_put(&grp_kobj->kobj); } } - debugf1("%s() end of seeking for group %s\n", + debugf4("%s() end of seeking for group %s\n", __func__, sysfs_attrib->grp->name); } else if (sysfs_attrib->attr.name) { - debugf0("%s() file %s\n", __func__, + debugf4("%s() file %s\n", __func__, sysfs_attrib->attr.name); sysfs_remove_file(kobj, &sysfs_attrib->attr); } else @@ -979,7 +979,7 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) debugf0("%s()\n", __func__); /* remove all csrow kobjects */ - debugf0("%s() unregister this mci kobj\n", __func__); + debugf4("%s() unregister this mci kobj\n", __func__); for (i = 0; i < mci->nr_csrows; i++) { if (mci->csrows[i].nr_pages > 0) { debugf0("%s() unreg csrow-%d\n", __func__, i); @@ -989,18 +989,18 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) /* remove this mci instance's attribtes */ if (mci->mc_driver_sysfs_attributes) { - debugf0("%s() unregister mci private attributes\n", __func__); + debugf4("%s() unregister mci private attributes\n", __func__); edac_remove_mci_instance_attributes(mci, mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0); } /* remove the symlink */ - debugf0("%s() remove_link\n", __func__); + debugf4("%s() remove_link\n", __func__); sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); /* unregister this instance's kobject */ - debugf0("%s() remove_mci_instance\n", __func__); + debugf4("%s() remove_mci_instance\n", __func__); kobject_put(&mci->edac_mci_kobj); } diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c index 05523b504271..76d1f576cdc8 100644 --- a/drivers/edac/i7300_edac.c +++ b/drivers/edac/i7300_edac.c @@ -162,7 +162,7 @@ static struct edac_pci_ctl_info *i7300_pci; #define AMBPRESENT_0 0x64 #define AMBPRESENT_1 0x66 -const static u16 mtr_regs[MAX_SLOTS] = { +static const u16 mtr_regs[MAX_SLOTS] = { 0x80, 0x84, 0x88, 0x8c, 0x82, 0x86, 0x8a, 0x8e }; diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 3218819b7286..92e65e7038e9 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c @@ -160,8 +160,8 @@ NOTE: Only ONE of the three must be enabled * 3:2 Rank 1 architecture * 1:0 Rank 0 architecture * - * 00 => x16 devices; i.e 4 banks - * 01 => x8 devices; i.e 8 banks + * 00 => 4 banks + * 01 => 8 banks */ #define I82975X_C0BNKARC 0x10e #define I82975X_C1BNKARC 0x18e @@ -278,6 +278,7 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci, struct i82975x_error_info *info, int handle_errors) { int row, multi_chan, chan; + unsigned long offst, page; multi_chan = mci->csrows[0].nr_channels - 1; @@ -292,17 +293,19 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci, info->errsts = info->errsts2; } - chan = info->eap & 1; - info->eap >>= 1; - if (info->xeap ) - info->eap |= 0x80000000; - info->eap >>= PAGE_SHIFT; - row = edac_mc_find_csrow_by_page(mci, info->eap); + page = (unsigned long) info->eap; + if (info->xeap & 1) + page |= 0x100000000ul; + chan = page & 1; + page >>= 1; + offst = page & ((1 << PAGE_SHIFT) - 1); + page >>= PAGE_SHIFT; + row = edac_mc_find_csrow_by_page(mci, page); if (info->errsts & 0x0002) - edac_mc_handle_ue(mci, info->eap, 0, row, "i82975x UE"); + edac_mc_handle_ue(mci, page, offst , row, "i82975x UE"); else - edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, + edac_mc_handle_ce(mci, page, offst, info->derrsyn, row, multi_chan ? chan : 0, "i82975x CE"); @@ -344,11 +347,7 @@ static int dual_channel_active(void __iomem *mch_window) static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank) { /* - * ASUS P5W DH either does not program this register or programs - * it wrong! - * ECC is possible on i92975x ONLY with DEV_X8 which should mean 'val' - * for each rank should be 01b - the LSB of the word should be 0x55; - * but it reads 0! + * ECC is possible on i92975x ONLY with DEV_X8 */ return DEV_X8; } @@ -356,11 +355,15 @@ static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank) static void i82975x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, void __iomem *mch_window) { + static const char *labels[4] = { + "DIMM A1", "DIMM A2", + "DIMM B1", "DIMM B2" + }; struct csrow_info *csrow; unsigned long last_cumul_size; u8 value; u32 cumul_size; - int index; + int index, chan; last_cumul_size = 0; @@ -369,11 +372,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, * The dram row boundary (DRB) reg values are boundary address * for each DRAM row with a granularity of 32 or 64MB (single/dual * channel operation). DRB regs are cumulative; therefore DRB7 will - * contain the total memory contained in all eight rows. - * - * FIXME: - * EDAC currently works for Dual-channel Interleaved configuration. - * Other configurations, which the chip supports, need fixing/testing. + * contain the total memory contained in all rows. * */ @@ -384,8 +383,26 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, ((index >= 4) ? 0x80 : 0)); cumul_size = value; cumul_size <<= (I82975X_DRB_SHIFT - PAGE_SHIFT); + /* + * Adjust cumul_size w.r.t number of channels + * + */ + if (csrow->nr_channels > 1) + cumul_size <<= 1; debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, cumul_size); + + /* + * Initialise dram labels + * index values: + * [0-7] for single-channel; i.e. csrow->nr_channels = 1 + * [0-3] for dual-channel; i.e. csrow->nr_channels = 2 + */ + for (chan = 0; chan < csrow->nr_channels; chan++) + strncpy(csrow->channels[chan].label, + labels[(index >> 1) + (chan * 2)], + EDAC_MC_LABEL_LEN); + if (cumul_size == last_cumul_size) continue; /* not populated */ @@ -393,8 +410,8 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci, csrow->last_page = cumul_size - 1; csrow->nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; - csrow->grain = 1 << 7; /* I82975X_EAP has 128B resolution */ - csrow->mtype = MEM_DDR; /* i82975x supports only DDR2 */ + csrow->grain = 1 << 6; /* I82975X_EAP has 64B resolution */ + csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */ csrow->dtype = i82975x_dram_type(mch_window, index); csrow->edac_mode = EDAC_SECDED; /* only supported */ } @@ -515,18 +532,20 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx) debugf3("%s(): init mci\n", __func__); mci->dev = &pdev->dev; - mci->mtype_cap = MEM_FLAG_DDR; + mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = I82975X_REVISION; mci->ctl_name = i82975x_devs[dev_idx].ctl_name; + mci->dev_name = pci_name(pdev); mci->edac_check = i82975x_check; mci->ctl_page_to_phys = NULL; debugf3("%s(): init pvt\n", __func__); pvt = (struct i82975x_pvt *) mci->pvt_info; pvt->mch_window = mch_window; i82975x_init_csrows(mci, pdev, mch_window); + mci->scrub_mode = SCRUB_HW_SRC; i82975x_get_error_info(mci, &discard); /* clear counters */ /* finalize this instance of memory controller with edac core */ @@ -664,7 +683,7 @@ module_init(i82975x_init); module_exit(i82975x_exit); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>"); +MODULE_AUTHOR("Arvind R. <arvino55@gmail.com>"); MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); module_param(edac_op_state, int, 0444); diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index f6cf73d93359..795cfbc0bf50 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -594,6 +594,7 @@ static bool nb_noop_mce(u16 ec, u8 xec) void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) { + struct cpuinfo_x86 *c = &boot_cpu_data; u16 ec = EC(m->status); u8 xec = XEC(m->status, 0x1f); u32 nbsh = (u32)(m->status >> 32); @@ -602,9 +603,8 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) pr_emerg(HW_ERR "Northbridge Error (node %d", node_id); /* F10h, revD can disable ErrCpu[3:0] through ErrCpuVal */ - if ((boot_cpu_data.x86 == 0x10) && - (boot_cpu_data.x86_model > 7)) { - if (nbsh & K8_NBSH_ERR_CPU_VAL) + if (c->x86 == 0x10 && c->x86_model > 7) { + if (nbsh & NBSH_ERR_CPU_VAL) core = nbsh & nb_err_cpumask; } else { u8 assoc_cpus = nbsh & nb_err_cpumask; @@ -646,7 +646,7 @@ void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) if (!fam_ops->nb_mce(ec, xec)) goto wrong_nb_mce; - if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10) + if (c->x86 == 0xf || c->x86 == 0x10 || c->x86 == 0x15) if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder) nb_bus_decoder(node_id, m, nbcfg); diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h index 45dda47173f2..795a3206acf5 100644 --- a/drivers/edac/mce_amd.h +++ b/drivers/edac/mce_amd.h @@ -31,19 +31,10 @@ #define R4(x) (((x) >> 4) & 0xf) #define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!") -#define K8_NBSH 0x4C - -#define K8_NBSH_VALID_BIT BIT(31) -#define K8_NBSH_OVERFLOW BIT(30) -#define K8_NBSH_UC_ERR BIT(29) -#define K8_NBSH_ERR_EN BIT(28) -#define K8_NBSH_MISCV BIT(27) -#define K8_NBSH_VALID_ERROR_ADDR BIT(26) -#define K8_NBSH_PCC BIT(25) -#define K8_NBSH_ERR_CPU_VAL BIT(24) -#define K8_NBSH_CECC BIT(14) -#define K8_NBSH_UECC BIT(13) -#define K8_NBSH_ERR_SCRUBER BIT(8) +/* + * F3x4C bits (MCi_STATUS' high half) + */ +#define NBSH_ERR_CPU_VAL BIT(24) enum tt_ids { TT_INSTR = 0, @@ -86,17 +77,6 @@ extern const char *to_msgs[]; extern const char *ii_msgs[]; /* - * relevant NB regs - */ -struct err_regs { - u32 nbcfg; - u32 nbsh; - u32 nbsl; - u32 nbeah; - u32 nbeal; -}; - -/* * per-family decoder ops */ struct amd_decoder_ops { diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c new file mode 100644 index 000000000000..1d5cf06f6c6b --- /dev/null +++ b/drivers/edac/tile_edac.c @@ -0,0 +1,254 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * Tilera-specific EDAC driver. + * + * This source code is derived from the following driver: + * + * Cell MIC driver for ECC counting + * + * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. + * <benh@kernel.crashing.org> + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/uaccess.h> +#include <linux/edac.h> +#include <hv/hypervisor.h> +#include <hv/drv_mshim_intf.h> + +#include "edac_core.h" + +#define DRV_NAME "tile-edac" + +/* Number of cs_rows needed per memory controller on TILEPro. */ +#define TILE_EDAC_NR_CSROWS 1 + +/* Number of channels per memory controller on TILEPro. */ +#define TILE_EDAC_NR_CHANS 1 + +/* Granularity of reported error in bytes on TILEPro. */ +#define TILE_EDAC_ERROR_GRAIN 8 + +/* TILE processor has multiple independent memory controllers. */ +struct platform_device *mshim_pdev[TILE_MAX_MSHIMS]; + +struct tile_edac_priv { + int hv_devhdl; /* Hypervisor device handle. */ + int node; /* Memory controller instance #. */ + unsigned int ce_count; /* + * Correctable-error counter + * kept by the driver. + */ +}; + +static void tile_edac_check(struct mem_ctl_info *mci) +{ + struct tile_edac_priv *priv = mci->pvt_info; + struct mshim_mem_error mem_error; + + if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_error, + sizeof(struct mshim_mem_error), MSHIM_MEM_ERROR_OFF) != + sizeof(struct mshim_mem_error)) { + pr_err(DRV_NAME ": MSHIM_MEM_ERROR_OFF pread failure.\n"); + return; + } + + /* Check if the current error count is different from the saved one. */ + if (mem_error.sbe_count != priv->ce_count) { + dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node); + priv->ce_count = mem_error.sbe_count; + edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name); + } +} + +/* + * Initialize the 'csrows' table within the mci control structure with the + * addressing of memory. + */ +static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci) +{ + struct csrow_info *csrow = &mci->csrows[0]; + struct tile_edac_priv *priv = mci->pvt_info; + struct mshim_mem_info mem_info; + + if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, + sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != + sizeof(struct mshim_mem_info)) { + pr_err(DRV_NAME ": MSHIM_MEM_INFO_OFF pread failure.\n"); + return -1; + } + + if (mem_info.mem_ecc) + csrow->edac_mode = EDAC_SECDED; + else + csrow->edac_mode = EDAC_NONE; + switch (mem_info.mem_type) { + case DDR2: + csrow->mtype = MEM_DDR2; + break; + + case DDR3: + csrow->mtype = MEM_DDR3; + break; + + default: + return -1; + } + + csrow->first_page = 0; + csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT; + csrow->last_page = csrow->first_page + csrow->nr_pages - 1; + csrow->grain = TILE_EDAC_ERROR_GRAIN; + csrow->dtype = DEV_UNKNOWN; + + return 0; +} + +static int __devinit tile_edac_mc_probe(struct platform_device *pdev) +{ + char hv_file[32]; + int hv_devhdl; + struct mem_ctl_info *mci; + struct tile_edac_priv *priv; + int rc; + + sprintf(hv_file, "mshim/%d", pdev->id); + hv_devhdl = hv_dev_open((HV_VirtAddr)hv_file, 0); + if (hv_devhdl < 0) + return -EINVAL; + + /* A TILE MC has a single channel and one chip-select row. */ + mci = edac_mc_alloc(sizeof(struct tile_edac_priv), + TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id); + if (mci == NULL) + return -ENOMEM; + priv = mci->pvt_info; + priv->node = pdev->id; + priv->hv_devhdl = hv_devhdl; + + mci->dev = &pdev->dev; + mci->mtype_cap = MEM_FLAG_DDR2; + mci->edac_ctl_cap = EDAC_FLAG_SECDED; + + mci->mod_name = DRV_NAME; + mci->ctl_name = "TILEPro_Memory_Controller"; + mci->dev_name = dev_name(&pdev->dev); + mci->edac_check = tile_edac_check; + + /* + * Initialize the MC control structure 'csrows' table + * with the mapping and control information. + */ + if (tile_edac_init_csrows(mci)) { + /* No csrows found. */ + mci->edac_cap = EDAC_FLAG_NONE; + } else { + mci->edac_cap = EDAC_FLAG_SECDED; + } + + platform_set_drvdata(pdev, mci); + + /* Register with EDAC core */ + rc = edac_mc_add_mc(mci); + if (rc) { + dev_err(&pdev->dev, "failed to register with EDAC core\n"); + edac_mc_free(mci); + return rc; + } + + return 0; +} + +static int __devexit tile_edac_mc_remove(struct platform_device *pdev) +{ + struct mem_ctl_info *mci = platform_get_drvdata(pdev); + + edac_mc_del_mc(&pdev->dev); + if (mci) + edac_mc_free(mci); + return 0; +} + +static struct platform_driver tile_edac_mc_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, + .probe = tile_edac_mc_probe, + .remove = __devexit_p(tile_edac_mc_remove), +}; + +/* + * Driver init routine. + */ +static int __init tile_edac_init(void) +{ + char hv_file[32]; + struct platform_device *pdev; + int i, err, num = 0; + + /* Only support POLL mode. */ + edac_op_state = EDAC_OPSTATE_POLL; + + err = platform_driver_register(&tile_edac_mc_driver); + if (err) + return err; + + for (i = 0; i < TILE_MAX_MSHIMS; i++) { + /* + * Not all memory controllers are configured such as in the + * case of a simulator. So we register only those mshims + * that are configured by the hypervisor. + */ + sprintf(hv_file, "mshim/%d", i); + if (hv_dev_open((HV_VirtAddr)hv_file, 0) < 0) + continue; + + pdev = platform_device_register_simple(DRV_NAME, i, NULL, 0); + if (IS_ERR(pdev)) + continue; + mshim_pdev[i] = pdev; + num++; + } + + if (num == 0) { + platform_driver_unregister(&tile_edac_mc_driver); + return -ENODEV; + } + return 0; +} + +/* + * Driver cleanup routine. + */ +static void __exit tile_edac_exit(void) +{ + int i; + + for (i = 0; i < TILE_MAX_MSHIMS; i++) { + struct platform_device *pdev = mshim_pdev[i]; + if (!pdev) + continue; + + platform_set_drvdata(pdev, NULL); + platform_device_unregister(pdev); + } + platform_driver_unregister(&tile_edac_mc_driver); +} + +module_init(tile_edac_init); +module_exit(tile_edac_exit); diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index c003fa4e2db1..c8658888e67b 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c @@ -362,3 +362,4 @@ void fw_iso_resource_manage(struct fw_card *card, int generation, *channel = ret; } } +EXPORT_SYMBOL(fw_iso_resource_manage); diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index f8dfcf1c6cbe..25e729cde2f7 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -147,9 +147,6 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event); /* -iso */ int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma); -void fw_iso_resource_manage(struct fw_card *card, int generation, - u64 channels_mask, int *channel, int *bandwidth, - bool allocate, __be32 buffer[2]); /* -topology */ diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 69ad529d92fb..ea5ac2dc1233 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -268,8 +268,10 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd) } /* generate SMI */ + /* inb to force posted write through and make SMI happen now */ asm volatile ( - "outb %b0,%w1" + "outb %b0,%w1\n" + "inb %w1" : /* no output args */ : "a" (smi_cmd->command_code), "d" (smi_cmd->command_address), diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c index 2975d22daffe..838ddbdf90cc 100644 --- a/drivers/gpio/pl061.c +++ b/drivers/gpio/pl061.c @@ -232,7 +232,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc) desc->irq_data.chip->irq_unmask(&desc->irq_data); } -static int pl061_probe(struct amba_device *dev, struct amba_id *id) +static int pl061_probe(struct amba_device *dev, const struct amba_id *id) { struct pl061_platform_data *pdata; struct pl061_gpio *chip; diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c index 463aed9403db..34664587a74e 100644 --- a/drivers/gpu/drm/drm_sman.c +++ b/drivers/gpu/drm/drm_sman.c @@ -59,9 +59,7 @@ drm_sman_init(struct drm_sman * sman, unsigned int num_managers, { int ret = 0; - sman->mm = (struct drm_sman_mm *) kcalloc(num_managers, - sizeof(*sman->mm), - GFP_KERNEL); + sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL); if (!sman->mm) { ret = -ENOMEM; goto out; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index b9427e689cf3..941080a77940 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2987,7 +2987,7 @@ int evergreen_resume(struct radeon_device *rdev) r = r600_ib_test(rdev); if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); + DRM_ERROR("radeon: failed testing IB (%d).\n", r); return r; } diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index fcc23e4e0b3c..f2204cb1ccdf 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3617,7 +3617,7 @@ int r100_ib_test(struct radeon_device *rdev) if (i < rdev->usec_timeout) { DRM_INFO("ib test succeeded in %u usecs\n", i); } else { - DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", + DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } @@ -3637,13 +3637,13 @@ int r100_ib_init(struct radeon_device *rdev) r = radeon_ib_pool_init(rdev); if (r) { - dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r); + dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r); r100_ib_fini(rdev); return r; } r = r100_ib_test(rdev); if (r) { - dev_err(rdev->dev, "failled testing IB (%d).\n", r); + dev_err(rdev->dev, "failed testing IB (%d).\n", r); r100_ib_fini(rdev); return r; } @@ -3799,12 +3799,12 @@ static int r100_startup(struct radeon_device *rdev) /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r = r100_ib_init(rdev); if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + dev_err(rdev->dev, "failed initializing IB (%d).\n", r); return r; } return 0; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 069efa8c8ecf..8713731fa014 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1401,12 +1401,12 @@ static int r300_startup(struct radeon_device *rdev) /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r = r100_ib_init(rdev); if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + dev_err(rdev->dev, "failed initializing IB (%d).\n", r); return r; } return 0; diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 0b59ed7c7d2c..417fab81812f 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -260,13 +260,13 @@ static int r420_startup(struct radeon_device *rdev) /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r420_cp_errata_init(rdev); r = r100_ib_init(rdev); if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + dev_err(rdev->dev, "failed initializing IB (%d).\n", r); return r; } return 0; diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 2ce80d976568..3081d07f8de5 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -193,12 +193,12 @@ static int r520_startup(struct radeon_device *rdev) /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r = r100_ib_init(rdev); if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + dev_err(rdev->dev, "failed initializing IB (%d).\n", r); return r; } return 0; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 12fdebf9aed8..be271c42de4d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2464,7 +2464,7 @@ int r600_resume(struct radeon_device *rdev) r = r600_ib_test(rdev); if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); + DRM_ERROR("radeon: failed testing IB (%d).\n", r); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 992d99d13fc5..bbc9cd823334 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -151,7 +151,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) /* 64 dwords should be enough for fence too */ r = radeon_ring_lock(rdev, 64); if (r) { - DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); + DRM_ERROR("radeon: scheduling IB failed (%d).\n", r); return r; } radeon_ring_ib_execute(rdev, ib); diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index c76283d9eb3d..aa6a66eeb4ec 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -412,12 +412,12 @@ static int rs400_startup(struct radeon_device *rdev) /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r = r100_ib_init(rdev); if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + dev_err(rdev->dev, "failed initializing IB (%d).\n", r); return r; } return 0; diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 8af4679db23e..19763f5df5e1 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -865,12 +865,12 @@ static int rs600_startup(struct radeon_device *rdev) /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r = r100_ib_init(rdev); if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + dev_err(rdev->dev, "failed initializing IB (%d).\n", r); return r; } diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 66c949b7c18c..a9049ed1a519 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -627,12 +627,12 @@ static int rs690_startup(struct radeon_device *rdev) /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r = r100_ib_init(rdev); if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + dev_err(rdev->dev, "failed initializing IB (%d).\n", r); return r; } diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 64b57af93714..6613ee9ecca3 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -398,12 +398,12 @@ static int rv515_startup(struct radeon_device *rdev) /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + dev_err(rdev->dev, "failed initializing CP (%d).\n", r); return r; } r = r100_ib_init(rdev); if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + dev_err(rdev->dev, "failed initializing IB (%d).\n", r); return r; } return 0; diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4cc7b717fedd..b974ac7df8df 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1209,7 +1209,7 @@ int rv770_resume(struct radeon_device *rdev) r = r600_ib_test(rdev); if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); + DRM_ERROR("radeon: failed testing IB (%d).\n", r); return r; } diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 2560f01c1a63..b7ec4057841d 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -68,9 +68,15 @@ config HID_A4TECH ---help--- Support for A4 tech X5 and WOP-35 / Trust 450L mice. -config HID_ACRUX_FF - tristate "ACRUX force feedback" +config HID_ACRUX + tristate "ACRUX game controller support" depends on USB_HID + ---help--- + Say Y here if you want to enable support for ACRUX game controllers. + +config HID_ACRUX_FF + tristate "ACRUX force feedback support" + depends on HID_ACRUX select INPUT_FF_MEMLESS ---help--- Say Y here if you want to enable force feedback support for ACRUX @@ -140,7 +146,12 @@ config HID_DRAGONRISE tristate "DragonRise Inc. game controller" depends on USB_HID ---help--- - Say Y here if you have DragonRise Inc.game controllers. + Say Y here if you have DragonRise Inc. game controllers. + These might be branded as: + - Tesun USB-703 + - Media-tech MT1504 "Rogue" + - DVTech JS19 "Gear" + - Defender Game Master config DRAGONRISE_FF bool "DragonRise Inc. force feedback" @@ -160,13 +171,6 @@ config HID_EMS_FF Currently the following devices are known to be supported: - Trio Linker Plus II -config HID_EGALAX - tristate "eGalax multi-touch panel" - depends on USB_HID - ---help--- - Support for the eGalax dual-touch panels, including the - Joojoo and Wetab tablets. - config HID_ELECOM tristate "ELECOM BM084 bluetooth mouse" depends on BT_HIDP @@ -180,6 +184,14 @@ config HID_EZKEY ---help--- Support for Ezkey BTC 8193 keyboard. +config HID_KEYTOUCH + tristate "Keyoutch HID devices" + depends on USB_HID + ---help--- + Support for Keytouch HID devices not fully compliant with + the specification. Currently supported: + - Keytouch IEC 60945 + config HID_KYE tristate "Kye/Genius Ergo Mouse" if EXPERT depends on USB_HID @@ -218,6 +230,12 @@ config HID_KENSINGTON ---help--- Support for Kensington Slimblade Trackball. +config HID_LCPOWER + tristate "LC-Power" + depends on USB_HID + ---help--- + Support for LC-Power RC1000MCE RF remote control. + config HID_LOGITECH tristate "Logitech devices" if EXPERT depends on USB_HID @@ -304,8 +322,11 @@ config HID_MULTITOUCH Say Y here if you have one of the following devices: - Cypress TrueTouch panels - Hanvon dual touch panels + - IrTouch Infrared USB panels - Pixcir dual touch panels - 'Sensing Win7-TwoFinger' panel by GeneralTouch + - eGalax dual-touch panels, including the + Joojoo and Wetab tablets If unsure, say N. @@ -319,10 +340,10 @@ config HID_NTRIG Support for N-Trig touch screen. config HID_ORTEK - tristate "Ortek WKB-2000 wireless keyboard and mouse trackpad" + tristate "Ortek PKB-1700/WKB-2000 wireless keyboard and mouse trackpad" depends on USB_HID ---help--- - Support for Ortek WKB-2000 wireless keyboard + mouse trackpad. + Support for Ortek PKB-1700/WKB-2000 wireless keyboard + mouse trackpad. config HID_PANTHERLORD tristate "Pantherlord/GreenAsia game controller" @@ -417,10 +438,22 @@ config HID_ROCCAT Say Y here if you have a Roccat mouse or keyboard and want OSD or macro execution support. +config HID_ROCCAT_COMMON + tristate + +config HID_ROCCAT_ARVO + tristate "Roccat Arvo keyboard support" + depends on USB_HID + select HID_ROCCAT + select HID_ROCCAT_COMMON + ---help--- + Support for Roccat Arvo keyboard. + config HID_ROCCAT_KONE tristate "Roccat Kone Mouse support" depends on USB_HID select HID_ROCCAT + select HID_ROCCAT_COMMON ---help--- Support for Roccat Kone mouse. @@ -428,13 +461,23 @@ config HID_ROCCAT_KONEPLUS tristate "Roccat Kone[+] mouse support" depends on USB_HID select HID_ROCCAT + select HID_ROCCAT_COMMON ---help--- Support for Roccat Kone[+] mouse. +config HID_ROCCAT_KOVAPLUS + tristate "Roccat Kova[+] mouse support" + depends on USB_HID + select HID_ROCCAT + select HID_ROCCAT_COMMON + ---help--- + Support for Roccat Kova[+] mouse. + config HID_ROCCAT_PYRA tristate "Roccat Pyra mouse support" depends on USB_HID select HID_ROCCAT + select HID_ROCCAT_COMMON ---help--- Support for Roccat Pyra mouse. diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 6efc2a0370ad..06c68ae3abee 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -27,21 +27,22 @@ endif obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o -obj-$(CONFIG_HID_ACRUX_FF) += hid-axff.o +obj-$(CONFIG_HID_ACRUX) += hid-axff.o obj-$(CONFIG_HID_APPLE) += hid-apple.o obj-$(CONFIG_HID_BELKIN) += hid-belkin.o obj-$(CONFIG_HID_CANDO) += hid-cando.o obj-$(CONFIG_HID_CHERRY) += hid-cherry.o obj-$(CONFIG_HID_CHICONY) += hid-chicony.o obj-$(CONFIG_HID_CYPRESS) += hid-cypress.o -obj-$(CONFIG_HID_DRAGONRISE) += hid-drff.o +obj-$(CONFIG_HID_DRAGONRISE) += hid-dr.o obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o -obj-$(CONFIG_HID_EGALAX) += hid-egalax.o obj-$(CONFIG_HID_ELECOM) += hid-elecom.o obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o obj-$(CONFIG_HID_GYRATION) += hid-gyration.o obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o +obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o obj-$(CONFIG_HID_KYE) += hid-kye.o +obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o @@ -56,8 +57,11 @@ obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o +obj-$(CONFIG_HID_ROCCAT_COMMON) += hid-roccat-common.o +obj-$(CONFIG_HID_ROCCAT_ARVO) += hid-roccat-arvo.o obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o obj-$(CONFIG_HID_ROCCAT_KONEPLUS) += hid-roccat-koneplus.o +obj-$(CONFIG_HID_ROCCAT_KOVAPLUS) += hid-roccat-kovaplus.o obj-$(CONFIG_HID_ROCCAT_PYRA) += hid-roccat-pyra.o obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c index e5b961d6ff22..b4554288de00 100644 --- a/drivers/hid/hid-axff.c +++ b/drivers/hid/hid-axff.c @@ -33,6 +33,8 @@ #include <linux/hid.h> #include "hid-ids.h" + +#ifdef CONFIG_HID_ACRUX_FF #include "usbhid/usbhid.h" struct axff_device { @@ -109,6 +111,12 @@ err_free_mem: kfree(axff); return error; } +#else +static inline int axff_init(struct hid_device *hid) +{ + return 0; +} +#endif static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -139,9 +147,25 @@ static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id) error); } + /* + * We need to start polling device right away, otherwise + * it will go into a coma. + */ + error = hid_hw_open(hdev); + if (error) { + dev_err(&hdev->dev, "hw open failed\n"); + return error; + } + return 0; } +static void ax_remove(struct hid_device *hdev) +{ + hid_hw_close(hdev); + hid_hw_stop(hdev); +} + static const struct hid_device_id ax_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802), }, { } @@ -149,9 +173,10 @@ static const struct hid_device_id ax_devices[] = { MODULE_DEVICE_TABLE(hid, ax_devices); static struct hid_driver ax_driver = { - .name = "acrux", - .id_table = ax_devices, - .probe = ax_probe, + .name = "acrux", + .id_table = ax_devices, + .probe = ax_probe, + .remove = ax_remove, }; static int __init ax_init(void) diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index d678cf3d33d5..c3d66269ed7d 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1159,6 +1159,32 @@ static bool hid_hiddev(struct hid_device *hdev) return !!hid_match_id(hdev, hid_hiddev_list); } + +static ssize_t +read_report_descriptor(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + + if (off >= hdev->rsize) + return 0; + + if (off + count > hdev->rsize) + count = hdev->rsize - off; + + memcpy(buf, hdev->rdesc + off, count); + + return count; +} + +static struct bin_attribute dev_bin_attr_report_desc = { + .attr = { .name = "report_descriptor", .mode = 0444 }, + .read = read_report_descriptor, + .size = HID_MAX_DESCRIPTOR_SIZE, +}; + int hid_connect(struct hid_device *hdev, unsigned int connect_mask) { static const char *types[] = { "Device", "Pointer", "Mouse", "Device", @@ -1169,6 +1195,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) char buf[64]; unsigned int i; int len; + int ret; if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); @@ -1230,6 +1257,11 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) bus = "<UNKNOWN>"; } + ret = device_create_bin_file(&hdev->dev, &dev_bin_attr_report_desc); + if (ret) + hid_warn(hdev, + "can't create sysfs report descriptor attribute err: %d\n", ret); + hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n", buf, bus, hdev->version >> 8, hdev->version & 0xff, type, hdev->name, hdev->phys); @@ -1240,6 +1272,7 @@ EXPORT_SYMBOL_GPL(hid_connect); void hid_disconnect(struct hid_device *hdev) { + device_remove_bin_file(&hdev->dev, &dev_bin_attr_report_desc); if (hdev->claimed & HID_CLAIMED_INPUT) hidinput_disconnect(hdev); if (hdev->claimed & HID_CLAIMED_HIDDEV) @@ -1256,9 +1289,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, -#if defined(CONFIG_HID_ACRUX_FF) || defined(CONFIG_HID_ACRUX_FF_MODULE) { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, -#endif { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, @@ -1328,6 +1359,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) }, { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) }, @@ -1345,9 +1377,12 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) }, + { HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) }, { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, @@ -1368,6 +1403,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, @@ -1400,12 +1436,15 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) }, + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, diff --git a/drivers/hid/hid-drff.c b/drivers/hid/hid-dr.c index afcf3d67eb02..61eece47204d 100644 --- a/drivers/hid/hid-drff.c +++ b/drivers/hid/hid-dr.c @@ -145,6 +145,110 @@ static inline int drff_init(struct hid_device *hid) } #endif +/* + * The original descriptor of joystick with PID 0x0011, represented by DVTech PC + * JS19. It seems both copied from another device and a result of confusion + * either about the specification or about the program used to create the + * descriptor. In any case, it's a wonder it works on Windows. + * + * Usage Page (Desktop), ; Generic desktop controls (01h) + * Usage (Joystik), ; Joystik (04h, application collection) + * Collection (Application), + * Collection (Logical), + * Report Size (8), + * Report Count (5), + * Logical Minimum (0), + * Logical Maximum (255), + * Physical Minimum (0), + * Physical Maximum (255), + * Usage (X), ; X (30h, dynamic value) + * Usage (X), ; X (30h, dynamic value) + * Usage (X), ; X (30h, dynamic value) + * Usage (X), ; X (30h, dynamic value) + * Usage (Y), ; Y (31h, dynamic value) + * Input (Variable), + * Report Size (4), + * Report Count (1), + * Logical Maximum (7), + * Physical Maximum (315), + * Unit (Degrees), + * Usage (00h), + * Input (Variable, Null State), + * Unit, + * Report Size (1), + * Report Count (10), + * Logical Maximum (1), + * Physical Maximum (1), + * Usage Page (Button), ; Button (09h) + * Usage Minimum (01h), + * Usage Maximum (0Ah), + * Input (Variable), + * Usage Page (FF00h), ; FF00h, vendor-defined + * Report Size (1), + * Report Count (10), + * Logical Maximum (1), + * Physical Maximum (1), + * Usage (01h), + * Input (Variable), + * End Collection, + * Collection (Logical), + * Report Size (8), + * Report Count (4), + * Physical Maximum (255), + * Logical Maximum (255), + * Usage (02h), + * Output (Variable), + * End Collection, + * End Collection + */ + +/* Size of the original descriptor of the PID 0x0011 joystick */ +#define PID0011_RDESC_ORIG_SIZE 101 + +/* Fixed report descriptor for PID 0x011 joystick */ +static __u8 pid0011_rdesc_fixed[] = { + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x09, 0x04, /* Usage (Joystik), */ + 0xA1, 0x01, /* Collection (Application), */ + 0xA1, 0x02, /* Collection (Logical), */ + 0x14, /* Logical Minimum (0), */ + 0x75, 0x08, /* Report Size (8), */ + 0x95, 0x03, /* Report Count (3), */ + 0x81, 0x01, /* Input (Constant), */ + 0x26, 0xFF, 0x00, /* Logical Maximum (255), */ + 0x95, 0x02, /* Report Count (2), */ + 0x09, 0x30, /* Usage (X), */ + 0x09, 0x31, /* Usage (Y), */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x04, /* Report Count (4), */ + 0x81, 0x01, /* Input (Constant), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x95, 0x0A, /* Report Count (10), */ + 0x05, 0x09, /* Usage Page (Button), */ + 0x19, 0x01, /* Usage Minimum (01h), */ + 0x29, 0x0A, /* Usage Maximum (0Ah), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x0A, /* Report Count (10), */ + 0x81, 0x01, /* Input (Constant), */ + 0xC0, /* End Collection, */ + 0xC0 /* End Collection */ +}; + +static __u8 *dr_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + switch (hdev->product) { + case 0x0011: + if (*rsize == PID0011_RDESC_ORIG_SIZE) { + rdesc = pid0011_rdesc_fixed; + *rsize = sizeof(pid0011_rdesc_fixed); + } + break; + } + return rdesc; +} + static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; @@ -163,7 +267,16 @@ static int dr_probe(struct hid_device *hdev, const struct hid_device_id *id) goto err; } - drff_init(hdev); + switch (hdev->product) { + case 0x0006: + ret = drff_init(hdev); + if (ret) { + dev_err(&hdev->dev, "force feedback init failed\n"); + hid_hw_stop(hdev); + goto err; + } + break; + } return 0; err: @@ -172,6 +285,7 @@ err: static const struct hid_device_id dr_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006), }, + { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011), }, { } }; MODULE_DEVICE_TABLE(hid, dr_devices); @@ -179,6 +293,7 @@ MODULE_DEVICE_TABLE(hid, dr_devices); static struct hid_driver dr_driver = { .name = "dragonrise", .id_table = dr_devices, + .report_fixup = dr_report_fixup, .probe = dr_probe, }; diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c deleted file mode 100644 index 03bee1970d70..000000000000 --- a/drivers/hid/hid-egalax.c +++ /dev/null @@ -1,279 +0,0 @@ -/* - * HID driver for eGalax dual-touch panels - * - * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr> - * Copyright (c) 2010 Henrik Rydberg <rydberg@euromail.se> - * Copyright (c) 2010 Canonical, Ltd. - * - */ - -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - */ - -#include <linux/device.h> -#include <linux/hid.h> -#include <linux/module.h> -#include <linux/usb.h> -#include <linux/input/mt.h> -#include <linux/slab.h> -#include "usbhid/usbhid.h" - -MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); -MODULE_DESCRIPTION("eGalax dual-touch panel"); -MODULE_LICENSE("GPL"); - -#include "hid-ids.h" - -#define MAX_SLOTS 2 - -/* estimated signal-to-noise ratios */ -#define SN_MOVE 4096 -#define SN_PRESSURE 32 - -struct egalax_data { - int valid; - int slot; - int touch; - int x, y, z; -}; - -static void set_abs(struct input_dev *input, unsigned int code, - struct hid_field *field, int snratio) -{ - int fmin = field->logical_minimum; - int fmax = field->logical_maximum; - int fuzz = snratio ? (fmax - fmin) / snratio : 0; - input_set_abs_params(input, code, fmin, fmax, fuzz, 0); -} - -static int egalax_input_mapping(struct hid_device *hdev, struct hid_input *hi, - struct hid_field *field, struct hid_usage *usage, - unsigned long **bit, int *max) -{ - struct input_dev *input = hi->input; - - switch (usage->hid & HID_USAGE_PAGE) { - - case HID_UP_GENDESK: - switch (usage->hid) { - case HID_GD_X: - field->logical_maximum = 32760; - hid_map_usage(hi, usage, bit, max, - EV_ABS, ABS_MT_POSITION_X); - set_abs(input, ABS_MT_POSITION_X, field, SN_MOVE); - /* touchscreen emulation */ - set_abs(input, ABS_X, field, SN_MOVE); - return 1; - case HID_GD_Y: - field->logical_maximum = 32760; - hid_map_usage(hi, usage, bit, max, - EV_ABS, ABS_MT_POSITION_Y); - set_abs(input, ABS_MT_POSITION_Y, field, SN_MOVE); - /* touchscreen emulation */ - set_abs(input, ABS_Y, field, SN_MOVE); - return 1; - } - return 0; - - case HID_UP_DIGITIZER: - switch (usage->hid) { - case HID_DG_TIPSWITCH: - /* touchscreen emulation */ - hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); - input_set_capability(input, EV_KEY, BTN_TOUCH); - return 1; - case HID_DG_INRANGE: - case HID_DG_CONFIDENCE: - case HID_DG_CONTACTCOUNT: - case HID_DG_CONTACTMAX: - return -1; - case HID_DG_CONTACTID: - input_mt_init_slots(input, MAX_SLOTS); - return 1; - case HID_DG_TIPPRESSURE: - field->logical_minimum = 0; - hid_map_usage(hi, usage, bit, max, - EV_ABS, ABS_MT_PRESSURE); - set_abs(input, ABS_MT_PRESSURE, field, SN_PRESSURE); - /* touchscreen emulation */ - set_abs(input, ABS_PRESSURE, field, SN_PRESSURE); - return 1; - } - return 0; - } - - /* ignore others (from other reports we won't get anyway) */ - return -1; -} - -static int egalax_input_mapped(struct hid_device *hdev, struct hid_input *hi, - struct hid_field *field, struct hid_usage *usage, - unsigned long **bit, int *max) -{ - /* tell hid-input to skip setup of these event types */ - if (usage->type == EV_KEY || usage->type == EV_ABS) - set_bit(usage->type, hi->input->evbit); - return -1; -} - -/* - * this function is called when a whole finger has been parsed, - * so that it can decide what to send to the input layer. - */ -static void egalax_filter_event(struct egalax_data *td, struct input_dev *input) -{ - input_mt_slot(input, td->slot); - input_mt_report_slot_state(input, MT_TOOL_FINGER, td->touch); - if (td->touch) { - input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x); - input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y); - input_event(input, EV_ABS, ABS_MT_PRESSURE, td->z); - } - input_mt_report_pointer_emulation(input, true); -} - -static int egalax_event(struct hid_device *hid, struct hid_field *field, - struct hid_usage *usage, __s32 value) -{ - struct egalax_data *td = hid_get_drvdata(hid); - - /* Note, eGalax has two product lines: the first is resistive and - * uses a standard parallel multitouch protocol (product ID == - * 48xx). The second is capacitive and uses an unusual "serial" - * protocol with a different message for each multitouch finger - * (product ID == 72xx). - */ - if (hid->claimed & HID_CLAIMED_INPUT) { - struct input_dev *input = field->hidinput->input; - - switch (usage->hid) { - case HID_DG_INRANGE: - td->valid = value; - break; - case HID_DG_CONFIDENCE: - /* avoid interference from generic hidinput handling */ - break; - case HID_DG_TIPSWITCH: - td->touch = value; - break; - case HID_DG_TIPPRESSURE: - td->z = value; - break; - case HID_DG_CONTACTID: - td->slot = clamp_val(value, 0, MAX_SLOTS - 1); - break; - case HID_GD_X: - td->x = value; - break; - case HID_GD_Y: - td->y = value; - /* this is the last field in a finger */ - if (td->valid) - egalax_filter_event(td, input); - break; - case HID_DG_CONTACTCOUNT: - /* touch emulation: this is the last field in a frame */ - break; - - default: - /* fallback to the generic hidinput handling */ - return 0; - } - } - - /* we have handled the hidinput part, now remains hiddev */ - if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event) - hid->hiddev_hid_event(hid, field, usage, value); - - return 1; -} - -static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id) -{ - int ret; - struct egalax_data *td; - struct hid_report *report; - - td = kzalloc(sizeof(struct egalax_data), GFP_KERNEL); - if (!td) { - hid_err(hdev, "cannot allocate eGalax data\n"); - return -ENOMEM; - } - hid_set_drvdata(hdev, td); - - ret = hid_parse(hdev); - if (ret) - goto end; - - ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); - if (ret) - goto end; - - report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[5]; - if (report) { - report->field[0]->value[0] = 2; - usbhid_submit_report(hdev, report, USB_DIR_OUT); - } - -end: - if (ret) - kfree(td); - - return ret; -} - -static void egalax_remove(struct hid_device *hdev) -{ - hid_hw_stop(hdev); - kfree(hid_get_drvdata(hdev)); - hid_set_drvdata(hdev, NULL); -} - -static const struct hid_device_id egalax_devices[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) }, - { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, - USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) }, - { } -}; -MODULE_DEVICE_TABLE(hid, egalax_devices); - -static const struct hid_usage_id egalax_grabbed_usages[] = { - { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID }, - { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} -}; - -static struct hid_driver egalax_driver = { - .name = "egalax-touch", - .id_table = egalax_devices, - .probe = egalax_probe, - .remove = egalax_remove, - .input_mapping = egalax_input_mapping, - .input_mapped = egalax_input_mapped, - .usage_table = egalax_grabbed_usages, - .event = egalax_event, -}; - -static int __init egalax_init(void) -{ - return hid_register_driver(&egalax_driver); -} - -static void __exit egalax_exit(void) -{ - hid_unregister_driver(&egalax_driver); -} - -module_init(egalax_init); -module_exit(egalax_exit); - diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c index 3975e039c3dd..e88b951cd10d 100644 --- a/drivers/hid/hid-gyration.c +++ b/drivers/hid/hid-gyration.c @@ -43,6 +43,11 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi, case 0x048: gy_map_key_clear(KEY_MEDIA); break; case 0x049: gy_map_key_clear(KEY_CAMERA); break; case 0x04a: gy_map_key_clear(KEY_VIDEO); break; + case 0x05a: gy_map_key_clear(KEY_TEXT); break; + case 0x05b: gy_map_key_clear(KEY_RED); break; + case 0x05c: gy_map_key_clear(KEY_GREEN); break; + case 0x05d: gy_map_key_clear(KEY_YELLOW); break; + case 0x05e: gy_map_key_clear(KEY_BLUE); break; default: return 0; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 92a0d61a7379..d485894ff4db 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -333,6 +333,9 @@ #define USB_VENDOR_ID_IMATION 0x0718 #define USB_DEVICE_ID_DISC_STAKKA 0xd000 +#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615 +#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070 + #define USB_VENDOR_ID_JESS 0x0c45 #define USB_DEVICE_ID_JESS_YUREX 0x1010 @@ -345,6 +348,9 @@ #define USB_VENDOR_ID_KWORLD 0x1b80 #define USB_DEVICE_ID_KWORLD_RADIO_FM700 0xd700 +#define USB_VENDOR_ID_KEYTOUCH 0x0926 +#define USB_DEVICE_ID_KEYTOUCH_IEC 0x3333 + #define USB_VENDOR_ID_KYE 0x0458 #define USB_DEVICE_ID_KYE_ERGO_525V 0x0087 #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 @@ -352,6 +358,9 @@ #define USB_VENDOR_ID_LABTEC 0x1020 #define USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD 0x0006 +#define USB_VENDOR_ID_LCPOWER 0x1241 +#define USB_DEVICE_ID_LCPOWER_LC1000 0xf767 + #define USB_VENDOR_ID_LD 0x0f11 #define USB_DEVICE_ID_LD_CASSY 0x1000 #define USB_DEVICE_ID_LD_POCKETCASSY 0x1010 @@ -383,6 +392,7 @@ #define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 #define USB_DEVICE_ID_LOGITECH_WINGMAN_FFG 0xc293 #define USB_DEVICE_ID_LOGITECH_MOMO_WHEEL 0xc295 +#define USB_DEVICE_ID_LOGITECH_DFP_WHEEL 0xc298 #define USB_DEVICE_ID_LOGITECH_G25_WHEEL 0xc299 #define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c #define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a @@ -466,6 +476,7 @@ #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064 #define USB_VENDOR_ID_ORTEK 0x05a4 +#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 #define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 #define USB_VENDOR_ID_PANJIT 0x134c @@ -496,8 +507,10 @@ #define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001 #define USB_VENDOR_ID_ROCCAT 0x1e7d +#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 #define USB_DEVICE_ID_ROCCAT_KONE 0x2ced #define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51 +#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50 #define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24 #define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 7f552bfad32c..cd74203c8178 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -290,14 +290,6 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel goto ignore; } - if (field->report_type == HID_FEATURE_REPORT) { - if (device->driver->feature_mapping) { - device->driver->feature_mapping(device, hidinput, field, - usage); - } - goto ignore; - } - if (device->driver->input_mapping) { int ret = device->driver->input_mapping(device, hidinput, field, usage, &bit, &max); @@ -835,6 +827,24 @@ static void hidinput_close(struct input_dev *dev) hid_hw_close(hid); } +static void report_features(struct hid_device *hid) +{ + struct hid_driver *drv = hid->driver; + struct hid_report_enum *rep_enum; + struct hid_report *rep; + int i, j; + + if (!drv->feature_mapping) + return; + + rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; + list_for_each_entry(rep, &rep_enum->report_list, list) + for (i = 0; i < rep->maxfield; i++) + for (j = 0; j < rep->field[i]->maxusage; j++) + drv->feature_mapping(hid, rep->field[i], + rep->field[i]->usage + j); +} + /* * Register the input device; print a message. * Configure the input layer interface @@ -863,7 +873,9 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) return -1; } - for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) { + report_features(hid); + + for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) { if (k == HID_OUTPUT_REPORT && hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS) continue; @@ -928,6 +940,7 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) return 0; out_cleanup: + list_del(&hidinput->list); input_free_device(hidinput->input); kfree(hidinput); out_unwind: diff --git a/drivers/hid/hid-keytouch.c b/drivers/hid/hid-keytouch.c new file mode 100644 index 000000000000..07cd825f6f01 --- /dev/null +++ b/drivers/hid/hid-keytouch.c @@ -0,0 +1,66 @@ +/* + * HID driver for Keytouch devices not fully compliant with HID standard + * + * Copyright (c) 2011 Jiri Kosina + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/device.h> +#include <linux/hid.h> +#include <linux/module.h> + +#include "hid-ids.h" + +/* Replace the broken report descriptor of this device with rather + * a default one */ +static __u8 keytouch_fixed_rdesc[] = { +0x05, 0x01, 0x09, 0x06, 0xa1, 0x01, 0x05, 0x07, 0x19, 0xe0, 0x29, 0xe7, 0x15, +0x00, 0x25, 0x01, 0x75, 0x01, 0x95, 0x08, 0x81, 0x02, 0x95, 0x01, 0x75, 0x08, +0x81, 0x01, 0x95, 0x03, 0x75, 0x01, 0x05, 0x08, 0x19, 0x01, 0x29, 0x03, 0x91, +0x02, 0x95, 0x05, 0x75, 0x01, 0x91, 0x01, 0x95, 0x06, 0x75, 0x08, 0x15, 0x00, +0x26, 0xff, 0x00, 0x05, 0x07, 0x19, 0x00, 0x2a, 0xff, 0x00, 0x81, 0x00, 0xc0 +}; + +static __u8 *keytouch_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + hid_info(hdev, "fixing up Keytouch IEC report descriptor\n"); + + rdesc = keytouch_fixed_rdesc; + *rsize = sizeof(keytouch_fixed_rdesc); + + return rdesc; +} + +static const struct hid_device_id keytouch_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, + { } +}; +MODULE_DEVICE_TABLE(hid, keytouch_devices); + +static struct hid_driver keytouch_driver = { + .name = "keytouch", + .id_table = keytouch_devices, + .report_fixup = keytouch_report_fixup, +}; + +static int __init keytouch_init(void) +{ + return hid_register_driver(&keytouch_driver); +} + +static void __exit keytouch_exit(void) +{ + hid_unregister_driver(&keytouch_driver); +} + +module_init(keytouch_init); +module_exit(keytouch_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jiri Kosina"); diff --git a/drivers/hid/hid-lcpower.c b/drivers/hid/hid-lcpower.c new file mode 100644 index 000000000000..c4fe9bd095b7 --- /dev/null +++ b/drivers/hid/hid-lcpower.c @@ -0,0 +1,70 @@ +/* + * HID driver for LC Power Model RC1000MCE + * + * Copyright (c) 2011 Chris Schlund + * based on hid-topseed module + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/device.h> +#include <linux/hid.h> +#include <linux/module.h> + +#include "hid-ids.h" + +#define ts_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ + EV_KEY, (c)) +static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + if ((usage->hid & HID_USAGE_PAGE) != 0x0ffbc0000) + return 0; + + switch (usage->hid & HID_USAGE) { + case 0x046: ts_map_key_clear(KEY_YELLOW); break; + case 0x047: ts_map_key_clear(KEY_GREEN); break; + case 0x049: ts_map_key_clear(KEY_BLUE); break; + case 0x04a: ts_map_key_clear(KEY_RED); break; + case 0x00d: ts_map_key_clear(KEY_HOME); break; + case 0x025: ts_map_key_clear(KEY_TV); break; + case 0x048: ts_map_key_clear(KEY_VCR); break; + case 0x024: ts_map_key_clear(KEY_MENU); break; + default: + return 0; + } + + return 1; +} + +static const struct hid_device_id ts_devices[] = { + { HID_USB_DEVICE( USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000) }, + { } +}; +MODULE_DEVICE_TABLE(hid, ts_devices); + +static struct hid_driver ts_driver = { + .name = "LC RC1000MCE", + .id_table = ts_devices, + .input_mapping = ts_input_mapping, +}; + +static int __init ts_init(void) +{ + return hid_register_driver(&ts_driver); +} + +static void __exit ts_exit(void) +{ + hid_unregister_driver(&ts_driver); +} + +module_init(ts_init); +module_exit(ts_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index aef4104da141..3da90402ee81 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c @@ -377,6 +377,8 @@ static const struct hid_device_id lg_devices[] = { .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL), .driver_data = LG_FF }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL), + .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL), .driver_data = LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ), diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 698e6459fd0b..318cc40df92d 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -258,7 +258,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda input_report_abs(input, ABS_MT_TRACKING_ID, id); input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2); input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2); - input_report_abs(input, ABS_MT_ORIENTATION, orientation); + input_report_abs(input, ABS_MT_ORIENTATION, -orientation); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); @@ -397,7 +397,7 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0); input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0); input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0); - input_set_abs_params(input, ABS_MT_ORIENTATION, -32, 31, 1, 0); + input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0); /* Note: Touch Y position from the device is inverted relative * to how pointer motion is reported (and relative to how USB diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 07d3183fdde5..ee01e65e22d6 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -5,6 +5,12 @@ * Copyright (c) 2010-2011 Benjamin Tissoires <benjamin.tissoires@gmail.com> * Copyright (c) 2010-2011 Ecole Nationale de l'Aviation Civile, France * + * This code is partly based on hid-egalax.c: + * + * Copyright (c) 2010 Stephane Chatty <chatty@enac.fr> + * Copyright (c) 2010 Henrik Rydberg <rydberg@euromail.se> + * Copyright (c) 2010 Canonical, Ltd. + * */ /* @@ -24,6 +30,7 @@ MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>"); +MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); MODULE_DESCRIPTION("HID multitouch panels"); MODULE_LICENSE("GPL"); @@ -36,6 +43,7 @@ MODULE_LICENSE("GPL"); #define MT_QUIRK_SLOT_IS_CONTACTNUMBER (1 << 3) #define MT_QUIRK_VALID_IS_INRANGE (1 << 4) #define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 5) +#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 6) struct mt_slot { __s32 x, y, p, w, h; @@ -65,10 +73,11 @@ struct mt_class { }; /* classes of device behavior */ -#define MT_CLS_DEFAULT 1 -#define MT_CLS_DUAL1 2 -#define MT_CLS_DUAL2 3 -#define MT_CLS_CYPRESS 4 +#define MT_CLS_DEFAULT 1 +#define MT_CLS_DUAL_INRANGE_CONTACTID 2 +#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 3 +#define MT_CLS_CYPRESS 4 +#define MT_CLS_EGALAX 5 /* * these device-dependent functions determine what slot corresponds @@ -104,13 +113,13 @@ static int find_slot_from_contactid(struct mt_device *td) struct mt_class mt_classes[] = { { .name = MT_CLS_DEFAULT, - .quirks = MT_QUIRK_VALID_IS_INRANGE, + .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP, .maxcontacts = 10 }, - { .name = MT_CLS_DUAL1, + { .name = MT_CLS_DUAL_INRANGE_CONTACTID, .quirks = MT_QUIRK_VALID_IS_INRANGE | MT_QUIRK_SLOT_IS_CONTACTID, .maxcontacts = 2 }, - { .name = MT_CLS_DUAL2, + { .name = MT_CLS_DUAL_INRANGE_CONTACTNUMBER, .quirks = MT_QUIRK_VALID_IS_INRANGE | MT_QUIRK_SLOT_IS_CONTACTNUMBER, .maxcontacts = 2 }, @@ -119,10 +128,18 @@ struct mt_class mt_classes[] = { MT_QUIRK_CYPRESS, .maxcontacts = 10 }, + { .name = MT_CLS_EGALAX, + .quirks = MT_QUIRK_SLOT_IS_CONTACTID | + MT_QUIRK_VALID_IS_INRANGE | + MT_QUIRK_EGALAX_XYZ_FIXUP, + .maxcontacts = 2, + .sn_move = 4096, + .sn_pressure = 32, + }, { } }; -static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi, +static void mt_feature_mapping(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage) { if (usage->hid == HID_DG_INPUTMODE) { @@ -146,11 +163,15 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, { struct mt_device *td = hid_get_drvdata(hdev); struct mt_class *cls = td->mtclass; + __s32 quirks = cls->quirks; + switch (usage->hid & HID_USAGE_PAGE) { case HID_UP_GENDESK: switch (usage->hid) { case HID_GD_X: + if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP) + field->logical_maximum = 32760; hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_POSITION_X); set_abs(hi->input, ABS_MT_POSITION_X, field, @@ -160,6 +181,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, td->last_slot_field = usage->hid; return 1; case HID_GD_Y: + if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP) + field->logical_maximum = 32760; hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_POSITION_Y); set_abs(hi->input, ABS_MT_POSITION_Y, field, @@ -203,6 +226,8 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, td->last_slot_field = usage->hid; return 1; case HID_DG_TIPPRESSURE: + if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP) + field->logical_minimum = 0; hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_PRESSURE); set_abs(hi->input, ABS_MT_PRESSURE, field, @@ -363,8 +388,11 @@ static int mt_event(struct hid_device *hid, struct hid_field *field, return 0; } - if (usage->hid == td->last_slot_field) + if (usage->hid == td->last_slot_field) { mt_complete_slot(td); + if (!td->last_field_index) + mt_emit_event(td, field->hidinput->input); + } if (field->index == td->last_field_index && td->num_received >= td->num_expected) @@ -466,18 +494,42 @@ static const struct hid_device_id mt_devices[] = { USB_DEVICE_ID_CYPRESS_TRUETOUCH) }, /* GeneralTouch panel */ - { .driver_data = MT_CLS_DUAL2, + { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER, HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) }, + /* IRTOUCH panels */ + { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID, + HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, + USB_DEVICE_ID_IRTOUCH_INFRARED_USB) }, + /* PixCir-based panels */ - { .driver_data = MT_CLS_DUAL1, + { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID, HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) }, - { .driver_data = MT_CLS_DUAL1, + { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID, HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) }, + /* Resistive eGalax devices */ + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) }, + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) }, + + /* Capacitive eGalax devices */ + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) }, + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) }, + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) }, + { } }; MODULE_DEVICE_TABLE(hid, mt_devices); diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c index beb403421e72..9fae2ebdd758 100644 --- a/drivers/hid/hid-ntrig.c +++ b/drivers/hid/hid-ntrig.c @@ -110,6 +110,36 @@ static int ntrig_version_string(unsigned char *raw, char *buf) return sprintf(buf, "%u.%u.%u.%u.%u", a, b, c, d, e); } +static inline int ntrig_get_mode(struct hid_device *hdev) +{ + struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT]. + report_id_hash[0x0d]; + + if (!report) + return -EINVAL; + + usbhid_submit_report(hdev, report, USB_DIR_IN); + usbhid_wait_io(hdev); + return (int)report->field[0]->value[0]; +} + +static inline void ntrig_set_mode(struct hid_device *hdev, const int mode) +{ + struct hid_report *report; + __u8 mode_commands[4] = { 0xe, 0xf, 0x1b, 0x10 }; + + if (mode < 0 || mode > 3) + return; + + report = hdev->report_enum[HID_FEATURE_REPORT]. + report_id_hash[mode_commands[mode]]; + + if (!report) + return; + + usbhid_submit_report(hdev, report, USB_DIR_IN); +} + static void ntrig_report_version(struct hid_device *hdev) { int ret; @@ -539,277 +569,288 @@ static int ntrig_input_mapped(struct hid_device *hdev, struct hid_input *hi, static int ntrig_event (struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { - struct input_dev *input = field->hidinput->input; struct ntrig_data *nd = hid_get_drvdata(hid); + struct input_dev *input; + + /* Skip processing if not a claimed input */ + if (!(hid->claimed & HID_CLAIMED_INPUT)) + goto not_claimed_input; + + /* This function is being called before the structures are fully + * initialized */ + if(!(field->hidinput && field->hidinput->input)) + return -EINVAL; + + input = field->hidinput->input; /* No special handling needed for the pen */ if (field->application == HID_DG_PEN) return 0; - if (hid->claimed & HID_CLAIMED_INPUT) { - switch (usage->hid) { - case 0xff000001: - /* Tag indicating the start of a multitouch group */ - nd->reading_mt = 1; - nd->first_contact_touch = 0; - break; - case HID_DG_TIPSWITCH: - nd->tipswitch = value; - /* Prevent emission of touch until validated */ - return 1; - case HID_DG_CONFIDENCE: - nd->confidence = value; - break; - case HID_GD_X: - nd->x = value; - /* Clear the contact footer */ - nd->mt_foot_count = 0; - break; - case HID_GD_Y: - nd->y = value; - break; - case HID_DG_CONTACTID: - nd->id = value; - break; - case HID_DG_WIDTH: - nd->w = value; - break; - case HID_DG_HEIGHT: - nd->h = value; + switch (usage->hid) { + case 0xff000001: + /* Tag indicating the start of a multitouch group */ + nd->reading_mt = 1; + nd->first_contact_touch = 0; + break; + case HID_DG_TIPSWITCH: + nd->tipswitch = value; + /* Prevent emission of touch until validated */ + return 1; + case HID_DG_CONFIDENCE: + nd->confidence = value; + break; + case HID_GD_X: + nd->x = value; + /* Clear the contact footer */ + nd->mt_foot_count = 0; + break; + case HID_GD_Y: + nd->y = value; + break; + case HID_DG_CONTACTID: + nd->id = value; + break; + case HID_DG_WIDTH: + nd->w = value; + break; + case HID_DG_HEIGHT: + nd->h = value; + /* + * when in single touch mode, this is the last + * report received in a finger event. We want + * to emit a normal (X, Y) position + */ + if (!nd->reading_mt) { /* - * when in single touch mode, this is the last - * report received in a finger event. We want - * to emit a normal (X, Y) position + * TipSwitch indicates the presence of a + * finger in single touch mode. */ - if (!nd->reading_mt) { - /* - * TipSwitch indicates the presence of a - * finger in single touch mode. - */ - input_report_key(input, BTN_TOUCH, - nd->tipswitch); - input_report_key(input, BTN_TOOL_DOUBLETAP, - nd->tipswitch); - input_event(input, EV_ABS, ABS_X, nd->x); - input_event(input, EV_ABS, ABS_Y, nd->y); - } + input_report_key(input, BTN_TOUCH, + nd->tipswitch); + input_report_key(input, BTN_TOOL_DOUBLETAP, + nd->tipswitch); + input_event(input, EV_ABS, ABS_X, nd->x); + input_event(input, EV_ABS, ABS_Y, nd->y); + } + break; + case 0xff000002: + /* + * we receive this when the device is in multitouch + * mode. The first of the three values tagged with + * this usage tells if the contact point is real + * or a placeholder + */ + + /* Shouldn't get more than 4 footer packets, so skip */ + if (nd->mt_foot_count >= 4) break; - case 0xff000002: - /* - * we receive this when the device is in multitouch - * mode. The first of the three values tagged with - * this usage tells if the contact point is real - * or a placeholder - */ - /* Shouldn't get more than 4 footer packets, so skip */ - if (nd->mt_foot_count >= 4) - break; + nd->mt_footer[nd->mt_foot_count++] = value; - nd->mt_footer[nd->mt_foot_count++] = value; + /* if the footer isn't complete break */ + if (nd->mt_foot_count != 4) + break; - /* if the footer isn't complete break */ - if (nd->mt_foot_count != 4) - break; + /* Pen activity signal. */ + if (nd->mt_footer[2]) { + /* + * When the pen deactivates touch, we see a + * bogus frame with ContactCount > 0. + * We can + * save a bit of work by ensuring act_state < 0 + * even if deactivation slack is turned off. + */ + nd->act_state = deactivate_slack - 1; + nd->confidence = 0; + break; + } - /* Pen activity signal. */ - if (nd->mt_footer[2]) { - /* - * When the pen deactivates touch, we see a - * bogus frame with ContactCount > 0. - * We can - * save a bit of work by ensuring act_state < 0 - * even if deactivation slack is turned off. - */ - nd->act_state = deactivate_slack - 1; + /* + * The first footer value indicates the presence of a + * finger. + */ + if (nd->mt_footer[0]) { + /* + * We do not want to process contacts under + * the size threshold, but do not want to + * ignore them for activation state + */ + if (nd->w < nd->min_width || + nd->h < nd->min_height) nd->confidence = 0; - break; - } + } else + break; + if (nd->act_state > 0) { /* - * The first footer value indicates the presence of a - * finger. + * Contact meets the activation size threshold */ - if (nd->mt_footer[0]) { - /* - * We do not want to process contacts under - * the size threshold, but do not want to - * ignore them for activation state - */ - if (nd->w < nd->min_width || - nd->h < nd->min_height) - nd->confidence = 0; - } else - break; - - if (nd->act_state > 0) { - /* - * Contact meets the activation size threshold - */ - if (nd->w >= nd->activation_width && - nd->h >= nd->activation_height) { - if (nd->id) - /* - * first contact, activate now - */ - nd->act_state = 0; - else { - /* - * avoid corrupting this frame - * but ensure next frame will - * be active - */ - nd->act_state = 1; - break; - } - } else + if (nd->w >= nd->activation_width && + nd->h >= nd->activation_height) { + if (nd->id) /* - * Defer adjusting the activation state - * until the end of the frame. + * first contact, activate now */ + nd->act_state = 0; + else { + /* + * avoid corrupting this frame + * but ensure next frame will + * be active + */ + nd->act_state = 1; break; - } - - /* Discarding this contact */ - if (!nd->confidence) - break; - - /* emit a normal (X, Y) for the first point only */ - if (nd->id == 0) { + } + } else /* - * TipSwitch is superfluous in multitouch - * mode. The footer events tell us - * if there is a finger on the screen or - * not. + * Defer adjusting the activation state + * until the end of the frame. */ - nd->first_contact_touch = nd->confidence; - input_event(input, EV_ABS, ABS_X, nd->x); - input_event(input, EV_ABS, ABS_Y, nd->y); - } + break; + } - /* Emit MT events */ - input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x); - input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y); + /* Discarding this contact */ + if (!nd->confidence) + break; + /* emit a normal (X, Y) for the first point only */ + if (nd->id == 0) { /* - * Translate from height and width to size - * and orientation. + * TipSwitch is superfluous in multitouch + * mode. The footer events tell us + * if there is a finger on the screen or + * not. */ - if (nd->w > nd->h) { - input_event(input, EV_ABS, - ABS_MT_ORIENTATION, 1); - input_event(input, EV_ABS, - ABS_MT_TOUCH_MAJOR, nd->w); - input_event(input, EV_ABS, - ABS_MT_TOUCH_MINOR, nd->h); - } else { - input_event(input, EV_ABS, - ABS_MT_ORIENTATION, 0); - input_event(input, EV_ABS, - ABS_MT_TOUCH_MAJOR, nd->h); - input_event(input, EV_ABS, - ABS_MT_TOUCH_MINOR, nd->w); - } - input_mt_sync(field->hidinput->input); - break; + nd->first_contact_touch = nd->confidence; + input_event(input, EV_ABS, ABS_X, nd->x); + input_event(input, EV_ABS, ABS_Y, nd->y); + } - case HID_DG_CONTACTCOUNT: /* End of a multitouch group */ - if (!nd->reading_mt) /* Just to be sure */ - break; + /* Emit MT events */ + input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x); + input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y); + + /* + * Translate from height and width to size + * and orientation. + */ + if (nd->w > nd->h) { + input_event(input, EV_ABS, + ABS_MT_ORIENTATION, 1); + input_event(input, EV_ABS, + ABS_MT_TOUCH_MAJOR, nd->w); + input_event(input, EV_ABS, + ABS_MT_TOUCH_MINOR, nd->h); + } else { + input_event(input, EV_ABS, + ABS_MT_ORIENTATION, 0); + input_event(input, EV_ABS, + ABS_MT_TOUCH_MAJOR, nd->h); + input_event(input, EV_ABS, + ABS_MT_TOUCH_MINOR, nd->w); + } + input_mt_sync(field->hidinput->input); + break; - nd->reading_mt = 0; + case HID_DG_CONTACTCOUNT: /* End of a multitouch group */ + if (!nd->reading_mt) /* Just to be sure */ + break; + nd->reading_mt = 0; + + + /* + * Activation state machine logic: + * + * Fundamental states: + * state > 0: Inactive + * state <= 0: Active + * state < -deactivate_slack: + * Pen termination of touch + * + * Specific values of interest + * state == activate_slack + * no valid input since the last reset + * + * state == 0 + * general operational state + * + * state == -deactivate_slack + * read sufficient empty frames to accept + * the end of input and reset + */ + + if (nd->act_state > 0) { /* Currently inactive */ + if (value) + /* + * Consider each live contact as + * evidence of intentional activity. + */ + nd->act_state = (nd->act_state > value) + ? nd->act_state - value + : 0; + else + /* + * Empty frame before we hit the + * activity threshold, reset. + */ + nd->act_state = nd->activate_slack; /* - * Activation state machine logic: - * - * Fundamental states: - * state > 0: Inactive - * state <= 0: Active - * state < -deactivate_slack: - * Pen termination of touch - * - * Specific values of interest - * state == activate_slack - * no valid input since the last reset - * - * state == 0 - * general operational state - * - * state == -deactivate_slack - * read sufficient empty frames to accept - * the end of input and reset + * Entered this block inactive and no + * coordinates sent this frame, so hold off + * on button state. */ - - if (nd->act_state > 0) { /* Currently inactive */ - if (value) - /* - * Consider each live contact as - * evidence of intentional activity. - */ - nd->act_state = (nd->act_state > value) - ? nd->act_state - value - : 0; - else - /* - * Empty frame before we hit the - * activity threshold, reset. - */ - nd->act_state = nd->activate_slack; - + break; + } else { /* Currently active */ + if (value && nd->act_state >= + nd->deactivate_slack) /* - * Entered this block inactive and no - * coordinates sent this frame, so hold off - * on button state. + * Live point: clear accumulated + * deactivation count. */ - break; - } else { /* Currently active */ - if (value && nd->act_state >= - nd->deactivate_slack) - /* - * Live point: clear accumulated - * deactivation count. - */ - nd->act_state = 0; - else if (nd->act_state <= nd->deactivate_slack) - /* - * We've consumed the deactivation - * slack, time to deactivate and reset. - */ - nd->act_state = - nd->activate_slack; - else { /* Move towards deactivation */ - nd->act_state--; - break; - } - } - - if (nd->first_contact_touch && nd->act_state <= 0) { + nd->act_state = 0; + else if (nd->act_state <= nd->deactivate_slack) /* - * Check to see if we're ready to start - * emitting touch events. - * - * Note: activation slack will decrease over - * the course of the frame, and it will be - * inconsistent from the start to the end of - * the frame. However if the frame starts - * with slack, first_contact_touch will still - * be 0 and we will not get to this point. + * We've consumed the deactivation + * slack, time to deactivate and reset. */ - input_report_key(input, BTN_TOOL_DOUBLETAP, 1); - input_report_key(input, BTN_TOUCH, 1); - } else { - input_report_key(input, BTN_TOOL_DOUBLETAP, 0); - input_report_key(input, BTN_TOUCH, 0); + nd->act_state = + nd->activate_slack; + else { /* Move towards deactivation */ + nd->act_state--; + break; } - break; + } - default: - /* fall-back to the generic hidinput handling */ - return 0; + if (nd->first_contact_touch && nd->act_state <= 0) { + /* + * Check to see if we're ready to start + * emitting touch events. + * + * Note: activation slack will decrease over + * the course of the frame, and it will be + * inconsistent from the start to the end of + * the frame. However if the frame starts + * with slack, first_contact_touch will still + * be 0 and we will not get to this point. + */ + input_report_key(input, BTN_TOOL_DOUBLETAP, 1); + input_report_key(input, BTN_TOUCH, 1); + } else { + input_report_key(input, BTN_TOOL_DOUBLETAP, 0); + input_report_key(input, BTN_TOUCH, 0); } + break; + + default: + /* fall-back to the generic hidinput handling */ + return 0; } +not_claimed_input: + /* we have handled the hidinput part, now remains hiddev */ if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_hid_event) hid->hiddev_hid_event(hid, field, usage, value); @@ -826,7 +867,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) struct hid_report *report; if (id->driver_data) - hdev->quirks |= HID_QUIRK_MULTI_INPUT; + hdev->quirks |= HID_QUIRK_MULTI_INPUT + | HID_QUIRK_NO_INIT_REPORTS; nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL); if (!nd) { @@ -893,8 +935,19 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) /* This is needed for devices with more recent firmware versions */ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0x0a]; - if (report) - usbhid_submit_report(hdev, report, USB_DIR_OUT); + if (report) { + /* Let the device settle to ensure the wakeup message gets + * through */ + usbhid_wait_io(hdev); + usbhid_submit_report(hdev, report, USB_DIR_IN); + + /* + * Sanity check: if the current mode is invalid reset it to + * something reasonable. + */ + if (ntrig_get_mode(hdev) >= 4) + ntrig_set_mode(hdev, 3); + } ntrig_report_version(hdev); diff --git a/drivers/hid/hid-ortek.c b/drivers/hid/hid-ortek.c index e90edfc63051..f9b7dd4f607f 100644 --- a/drivers/hid/hid-ortek.c +++ b/drivers/hid/hid-ortek.c @@ -1,7 +1,6 @@ /* - * HID driver for Ortek WKB-2000 (wireless keyboard + mouse trackpad). - * Fixes LogicalMaximum error in USB report description, see - * http://bugzilla.kernel.org/show_bug.cgi?id=14787 + * HID driver for Ortek PKB-1700/WKB-2000 (wireless keyboard + mouse trackpad). + * Fixes LogicalMaximum error in HID report description. * * Copyright (c) 2010 Johnathon Harris <jmharris@gmail.com> */ @@ -30,6 +29,7 @@ static __u8 *ortek_report_fixup(struct hid_device *hdev, __u8 *rdesc, } static const struct hid_device_id ortek_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, { } }; diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c new file mode 100644 index 000000000000..2307471d96dc --- /dev/null +++ b/drivers/hid/hid-roccat-arvo.c @@ -0,0 +1,450 @@ +/* + * Roccat Arvo driver for Linux + * + * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +/* + * Roccat Arvo is a gamer keyboard with 5 macro keys that can be configured in + * 5 profiles. + */ + +#include <linux/device.h> +#include <linux/input.h> +#include <linux/hid.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/hid-roccat.h> +#include "hid-ids.h" +#include "hid-roccat-common.h" +#include "hid-roccat-arvo.h" + +static struct class *arvo_class; + +static ssize_t arvo_sysfs_show_mode_key(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arvo_device *arvo = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + struct usb_device *usb_dev = + interface_to_usbdev(to_usb_interface(dev->parent->parent)); + struct arvo_mode_key temp_buf; + int retval; + + mutex_lock(&arvo->arvo_lock); + retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_MODE_KEY, + &temp_buf, sizeof(struct arvo_mode_key)); + mutex_unlock(&arvo->arvo_lock); + if (retval) + return retval; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.state); +} + +static ssize_t arvo_sysfs_set_mode_key(struct device *dev, + struct device_attribute *attr, char const *buf, size_t size) +{ + struct arvo_device *arvo = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + struct usb_device *usb_dev = + interface_to_usbdev(to_usb_interface(dev->parent->parent)); + struct arvo_mode_key temp_buf; + unsigned long state; + int retval; + + retval = strict_strtoul(buf, 10, &state); + if (retval) + return retval; + + temp_buf.command = ARVO_COMMAND_MODE_KEY; + temp_buf.state = state; + + mutex_lock(&arvo->arvo_lock); + retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_MODE_KEY, + &temp_buf, sizeof(struct arvo_mode_key)); + mutex_unlock(&arvo->arvo_lock); + if (retval) + return retval; + + return size; +} + +static ssize_t arvo_sysfs_show_key_mask(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arvo_device *arvo = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + struct usb_device *usb_dev = + interface_to_usbdev(to_usb_interface(dev->parent->parent)); + struct arvo_key_mask temp_buf; + int retval; + + mutex_lock(&arvo->arvo_lock); + retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_KEY_MASK, + &temp_buf, sizeof(struct arvo_key_mask)); + mutex_unlock(&arvo->arvo_lock); + if (retval) + return retval; + + return snprintf(buf, PAGE_SIZE, "%d\n", temp_buf.key_mask); +} + +static ssize_t arvo_sysfs_set_key_mask(struct device *dev, + struct device_attribute *attr, char const *buf, size_t size) +{ + struct arvo_device *arvo = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + struct usb_device *usb_dev = + interface_to_usbdev(to_usb_interface(dev->parent->parent)); + struct arvo_key_mask temp_buf; + unsigned long key_mask; + int retval; + + retval = strict_strtoul(buf, 10, &key_mask); + if (retval) + return retval; + + temp_buf.command = ARVO_COMMAND_KEY_MASK; + temp_buf.key_mask = key_mask; + + mutex_lock(&arvo->arvo_lock); + retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_KEY_MASK, + &temp_buf, sizeof(struct arvo_key_mask)); + mutex_unlock(&arvo->arvo_lock); + if (retval) + return retval; + + return size; +} + +/* retval is 1-5 on success, < 0 on error */ +static int arvo_get_actual_profile(struct usb_device *usb_dev) +{ + struct arvo_actual_profile temp_buf; + int retval; + + retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_ACTUAL_PROFILE, + &temp_buf, sizeof(struct arvo_actual_profile)); + + if (retval) + return retval; + + return temp_buf.actual_profile; +} + +static ssize_t arvo_sysfs_show_actual_profile(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct arvo_device *arvo = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + + return snprintf(buf, PAGE_SIZE, "%d\n", arvo->actual_profile); +} + +static ssize_t arvo_sysfs_set_actual_profile(struct device *dev, + struct device_attribute *attr, char const *buf, size_t size) +{ + struct arvo_device *arvo = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + struct usb_device *usb_dev = + interface_to_usbdev(to_usb_interface(dev->parent->parent)); + struct arvo_actual_profile temp_buf; + unsigned long profile; + int retval; + + retval = strict_strtoul(buf, 10, &profile); + if (retval) + return retval; + + temp_buf.command = ARVO_COMMAND_ACTUAL_PROFILE; + temp_buf.actual_profile = profile; + + mutex_lock(&arvo->arvo_lock); + retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_ACTUAL_PROFILE, + &temp_buf, sizeof(struct arvo_actual_profile)); + if (!retval) { + arvo->actual_profile = profile; + retval = size; + } + mutex_unlock(&arvo->arvo_lock); + return retval; +} + +static ssize_t arvo_sysfs_write(struct file *fp, + struct kobject *kobj, void const *buf, + loff_t off, size_t count, size_t real_size, uint command) +{ + struct device *dev = + container_of(kobj, struct device, kobj)->parent->parent; + struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev)); + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); + int retval; + + if (off != 0 || count != real_size) + return -EINVAL; + + mutex_lock(&arvo->arvo_lock); + retval = roccat_common_send(usb_dev, command, buf, real_size); + mutex_unlock(&arvo->arvo_lock); + + return (retval ? retval : real_size); +} + +static ssize_t arvo_sysfs_read(struct file *fp, + struct kobject *kobj, void *buf, loff_t off, + size_t count, size_t real_size, uint command) +{ + struct device *dev = + container_of(kobj, struct device, kobj)->parent->parent; + struct arvo_device *arvo = hid_get_drvdata(dev_get_drvdata(dev)); + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); + int retval; + + if (off >= real_size) + return 0; + + if (off != 0 || count != real_size) + return -EINVAL; + + mutex_lock(&arvo->arvo_lock); + retval = roccat_common_receive(usb_dev, command, buf, real_size); + mutex_unlock(&arvo->arvo_lock); + + return (retval ? retval : real_size); +} + +static ssize_t arvo_sysfs_write_button(struct file *fp, + struct kobject *kobj, struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + return arvo_sysfs_write(fp, kobj, buf, off, count, + sizeof(struct arvo_button), ARVO_USB_COMMAND_BUTTON); +} + +static ssize_t arvo_sysfs_read_info(struct file *fp, + struct kobject *kobj, struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + return arvo_sysfs_read(fp, kobj, buf, off, count, + sizeof(struct arvo_info), ARVO_USB_COMMAND_INFO); +} + + +static struct device_attribute arvo_attributes[] = { + __ATTR(mode_key, 0660, + arvo_sysfs_show_mode_key, arvo_sysfs_set_mode_key), + __ATTR(key_mask, 0660, + arvo_sysfs_show_key_mask, arvo_sysfs_set_key_mask), + __ATTR(actual_profile, 0660, + arvo_sysfs_show_actual_profile, + arvo_sysfs_set_actual_profile), + __ATTR_NULL +}; + +static struct bin_attribute arvo_bin_attributes[] = { + { + .attr = { .name = "button", .mode = 0220 }, + .size = sizeof(struct arvo_button), + .write = arvo_sysfs_write_button + }, + { + .attr = { .name = "info", .mode = 0440 }, + .size = sizeof(struct arvo_info), + .read = arvo_sysfs_read_info + }, + __ATTR_NULL +}; + +static int arvo_init_arvo_device_struct(struct usb_device *usb_dev, + struct arvo_device *arvo) +{ + int retval; + + mutex_init(&arvo->arvo_lock); + + retval = arvo_get_actual_profile(usb_dev); + if (retval < 0) + return retval; + arvo->actual_profile = retval; + + return 0; +} + +static int arvo_init_specials(struct hid_device *hdev) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct usb_device *usb_dev = interface_to_usbdev(intf); + struct arvo_device *arvo; + int retval; + + if (intf->cur_altsetting->desc.bInterfaceProtocol + == USB_INTERFACE_PROTOCOL_KEYBOARD) { + hid_set_drvdata(hdev, NULL); + return 0; + } + + arvo = kzalloc(sizeof(*arvo), GFP_KERNEL); + if (!arvo) { + hid_err(hdev, "can't alloc device descriptor\n"); + return -ENOMEM; + } + hid_set_drvdata(hdev, arvo); + + retval = arvo_init_arvo_device_struct(usb_dev, arvo); + if (retval) { + hid_err(hdev, "couldn't init struct arvo_device\n"); + goto exit_free; + } + + retval = roccat_connect(arvo_class, hdev, + sizeof(struct arvo_roccat_report)); + if (retval < 0) { + hid_err(hdev, "couldn't init char dev\n"); + } else { + arvo->chrdev_minor = retval; + arvo->roccat_claimed = 1; + } + + return 0; +exit_free: + kfree(arvo); + return retval; +} + +static void arvo_remove_specials(struct hid_device *hdev) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct arvo_device *arvo; + + if (intf->cur_altsetting->desc.bInterfaceProtocol + == USB_INTERFACE_PROTOCOL_KEYBOARD) + return; + + arvo = hid_get_drvdata(hdev); + if (arvo->roccat_claimed) + roccat_disconnect(arvo->chrdev_minor); + kfree(arvo); +} + +static int arvo_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + int retval; + + retval = hid_parse(hdev); + if (retval) { + hid_err(hdev, "parse failed\n"); + goto exit; + } + + retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (retval) { + hid_err(hdev, "hw start failed\n"); + goto exit; + } + + retval = arvo_init_specials(hdev); + if (retval) { + hid_err(hdev, "couldn't install keyboard\n"); + goto exit_stop; + } + + return 0; + +exit_stop: + hid_hw_stop(hdev); +exit: + return retval; +} + +static void arvo_remove(struct hid_device *hdev) +{ + arvo_remove_specials(hdev); + hid_hw_stop(hdev); +} + +static void arvo_report_to_chrdev(struct arvo_device const *arvo, + u8 const *data) +{ + struct arvo_special_report const *special_report; + struct arvo_roccat_report roccat_report; + + special_report = (struct arvo_special_report const *)data; + + roccat_report.profile = arvo->actual_profile; + roccat_report.button = special_report->event & + ARVO_SPECIAL_REPORT_EVENT_MASK_BUTTON; + if ((special_report->event & ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION) == + ARVO_SPECIAL_REPORT_EVENT_ACTION_PRESS) + roccat_report.action = ARVO_ROCCAT_REPORT_ACTION_PRESS; + else + roccat_report.action = ARVO_ROCCAT_REPORT_ACTION_RELEASE; + + roccat_report_event(arvo->chrdev_minor, + (uint8_t const *)&roccat_report); +} + +static int arvo_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) +{ + struct arvo_device *arvo = hid_get_drvdata(hdev); + + if (size != 3) + return 0; + + if (arvo->roccat_claimed) + arvo_report_to_chrdev(arvo, data); + + return 0; +} + +static const struct hid_device_id arvo_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, + { } +}; + +MODULE_DEVICE_TABLE(hid, arvo_devices); + +static struct hid_driver arvo_driver = { + .name = "arvo", + .id_table = arvo_devices, + .probe = arvo_probe, + .remove = arvo_remove, + .raw_event = arvo_raw_event +}; + +static int __init arvo_init(void) +{ + int retval; + + arvo_class = class_create(THIS_MODULE, "arvo"); + if (IS_ERR(arvo_class)) + return PTR_ERR(arvo_class); + arvo_class->dev_attrs = arvo_attributes; + arvo_class->dev_bin_attrs = arvo_bin_attributes; + + retval = hid_register_driver(&arvo_driver); + if (retval) + class_destroy(arvo_class); + return retval; +} + +static void __exit arvo_exit(void) +{ + hid_unregister_driver(&arvo_driver); + class_destroy(arvo_class); +} + +module_init(arvo_init); +module_exit(arvo_exit); + +MODULE_AUTHOR("Stefan Achatz"); +MODULE_DESCRIPTION("USB Roccat Arvo driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hid/hid-roccat-arvo.h b/drivers/hid/hid-roccat-arvo.h new file mode 100644 index 000000000000..d284a781c99e --- /dev/null +++ b/drivers/hid/hid-roccat-arvo.h @@ -0,0 +1,98 @@ +#ifndef __HID_ROCCAT_ARVO_H +#define __HID_ROCCAT_ARVO_H + +/* + * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/types.h> + +struct arvo_mode_key { /* 2 bytes */ + uint8_t command; /* ARVO_COMMAND_MODE_KEY */ + uint8_t state; +} __packed; + +struct arvo_button { + uint8_t unknown[24]; +} __packed; + +struct arvo_info { + uint8_t unknown[8]; +} __packed; + +struct arvo_key_mask { /* 2 bytes */ + uint8_t command; /* ARVO_COMMAND_KEY_MASK */ + uint8_t key_mask; +} __packed; + +/* selected profile is persistent */ +struct arvo_actual_profile { /* 2 bytes */ + uint8_t command; /* ARVO_COMMAND_ACTUAL_PROFILE */ + uint8_t actual_profile; +} __packed; + +enum arvo_commands { + ARVO_COMMAND_MODE_KEY = 0x3, + ARVO_COMMAND_BUTTON = 0x4, + ARVO_COMMAND_INFO = 0x5, + ARVO_COMMAND_KEY_MASK = 0x6, + ARVO_COMMAND_ACTUAL_PROFILE = 0x7, +}; + +enum arvo_usb_commands { + ARVO_USB_COMMAND_MODE_KEY = 0x303, + /* + * read/write + * Read uses both index bytes as profile/key indexes + * Write has index 0, profile/key is determined by payload + */ + ARVO_USB_COMMAND_BUTTON = 0x304, + ARVO_USB_COMMAND_INFO = 0x305, + ARVO_USB_COMMAND_KEY_MASK = 0x306, + ARVO_USB_COMMAND_ACTUAL_PROFILE = 0x307, +}; + +struct arvo_special_report { + uint8_t unknown1; /* always 0x01 */ + uint8_t event; + uint8_t unknown2; /* always 0x70 */ +} __packed; + +enum arvo_special_report_events { + ARVO_SPECIAL_REPORT_EVENT_ACTION_PRESS = 0x10, + ARVO_SPECIAL_REPORT_EVENT_ACTION_RELEASE = 0x0, +}; + +enum arvo_special_report_event_masks { + ARVO_SPECIAL_REPORT_EVENT_MASK_ACTION = 0xf0, + ARVO_SPECIAL_REPORT_EVENT_MASK_BUTTON = 0x0f, +}; + +struct arvo_roccat_report { + uint8_t profile; + uint8_t button; + uint8_t action; +} __packed; + +enum arvo_roccat_report_action { + ARVO_ROCCAT_REPORT_ACTION_RELEASE = 0, + ARVO_ROCCAT_REPORT_ACTION_PRESS = 1, +}; + +struct arvo_device { + int roccat_claimed; + int chrdev_minor; + + struct mutex arvo_lock; + + int actual_profile; +}; + +#endif diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c new file mode 100644 index 000000000000..13b1eb0c8c65 --- /dev/null +++ b/drivers/hid/hid-roccat-common.c @@ -0,0 +1,62 @@ +/* + * Roccat common functions for device specific drivers + * + * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/slab.h> +#include "hid-roccat-common.h" + +int roccat_common_receive(struct usb_device *usb_dev, uint usb_command, + void *data, uint size) +{ + char *buf; + int len; + + buf = kmalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), + USB_REQ_CLEAR_FEATURE, + USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, + usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT); + + memcpy(data, buf, size); + kfree(buf); + return ((len < 0) ? len : ((len != size) ? -EIO : 0)); +} +EXPORT_SYMBOL_GPL(roccat_common_receive); + +int roccat_common_send(struct usb_device *usb_dev, uint usb_command, + void const *data, uint size) +{ + char *buf; + int len; + + buf = kmalloc(size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + memcpy(buf, data, size); + + len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), + USB_REQ_SET_CONFIGURATION, + USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, + usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT); + + kfree(buf); + return ((len < 0) ? len : ((len != size) ? -EIO : 0)); +} +EXPORT_SYMBOL_GPL(roccat_common_send); + +MODULE_AUTHOR("Stefan Achatz"); +MODULE_DESCRIPTION("USB Roccat common driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h new file mode 100644 index 000000000000..fe45fae05bb9 --- /dev/null +++ b/drivers/hid/hid-roccat-common.h @@ -0,0 +1,23 @@ +#ifndef __HID_ROCCAT_COMMON_H +#define __HID_ROCCAT_COMMON_H + +/* + * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/usb.h> +#include <linux/types.h> + +int roccat_common_receive(struct usb_device *usb_dev, uint usb_command, + void *data, uint size); +int roccat_common_send(struct usb_device *usb_dev, uint usb_command, + void const *data, uint size); + +#endif diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c index cbd8cc42e75a..a57838d15267 100644 --- a/drivers/hid/hid-roccat-kone.c +++ b/drivers/hid/hid-roccat-kone.c @@ -28,11 +28,11 @@ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> -#include <linux/usb.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/hid-roccat.h> #include "hid-ids.h" -#include "hid-roccat.h" +#include "hid-roccat-common.h" #include "hid-roccat-kone.h" static uint profile_numbers[5] = {0, 1, 2, 3, 4}; @@ -58,12 +58,8 @@ static void kone_set_settings_checksum(struct kone_settings *settings) */ static int kone_check_write(struct usb_device *usb_dev) { - int len; - unsigned char *data; - - data = kmalloc(1, GFP_KERNEL); - if (!data) - return -ENOMEM; + int retval; + uint8_t data; do { /* @@ -72,56 +68,36 @@ static int kone_check_write(struct usb_device *usb_dev) */ msleep(80); - len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | - USB_DIR_IN, - kone_command_confirm_write, 0, data, 1, - USB_CTRL_SET_TIMEOUT); - - if (len != 1) { - kfree(data); - return -EIO; - } + retval = roccat_common_receive(usb_dev, + kone_command_confirm_write, &data, 1); + if (retval) + return retval; /* * value of 3 seems to mean something like * "not finished yet, but it looks good" * So check again after a moment. */ - } while (*data == 3); + } while (data == 3); - if (*data == 1) { /* everything alright */ - kfree(data); + if (data == 1) /* everything alright */ return 0; - } else { /* unknown answer */ - hid_err(usb_dev, "got retval %d when checking write\n", *data); - kfree(data); - return -EIO; - } + + /* unknown answer */ + hid_err(usb_dev, "got retval %d when checking write\n", data); + return -EIO; } /* * Reads settings from mouse and stores it in @buf - * @buf has to be alloced with GFP_KERNEL * On success returns 0 * On failure returns errno */ static int kone_get_settings(struct usb_device *usb_dev, struct kone_settings *buf) { - int len; - - len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, - kone_command_settings, 0, buf, - sizeof(struct kone_settings), USB_CTRL_SET_TIMEOUT); - - if (len != sizeof(struct kone_settings)) - return -EIO; - - return 0; + return roccat_common_receive(usb_dev, kone_command_settings, buf, + sizeof(struct kone_settings)); } /* @@ -132,22 +108,12 @@ static int kone_get_settings(struct usb_device *usb_dev, static int kone_set_settings(struct usb_device *usb_dev, struct kone_settings const *settings) { - int len; - - len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_CONFIGURATION, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, - kone_command_settings, 0, (char *)settings, - sizeof(struct kone_settings), - USB_CTRL_SET_TIMEOUT); - - if (len != sizeof(struct kone_settings)) - return -EIO; - - if (kone_check_write(usb_dev)) - return -EIO; - - return 0; + int retval; + retval = roccat_common_send(usb_dev, kone_command_settings, + settings, sizeof(struct kone_settings)); + if (retval) + return retval; + return kone_check_write(usb_dev); } /* @@ -193,7 +159,7 @@ static int kone_set_profile(struct usb_device *usb_dev, len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), USB_REQ_SET_CONFIGURATION, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, - kone_command_profile, number, (char *)profile, + kone_command_profile, number, (void *)profile, sizeof(struct kone_profile), USB_CTRL_SET_TIMEOUT); @@ -213,24 +179,15 @@ static int kone_set_profile(struct usb_device *usb_dev, */ static int kone_get_weight(struct usb_device *usb_dev, int *result) { - int len; - uint8_t *data; + int retval; + uint8_t data; - data = kmalloc(1, GFP_KERNEL); - if (!data) - return -ENOMEM; + retval = roccat_common_receive(usb_dev, kone_command_weight, &data, 1); - len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, - kone_command_weight, 0, data, 1, USB_CTRL_SET_TIMEOUT); + if (retval) + return retval; - if (len != 1) { - kfree(data); - return -EIO; - } - *result = (int)*data; - kfree(data); + *result = (int)data; return 0; } @@ -241,25 +198,15 @@ static int kone_get_weight(struct usb_device *usb_dev, int *result) */ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result) { - int len; - unsigned char *data; - - data = kmalloc(2, GFP_KERNEL); - if (!data) - return -ENOMEM; + int retval; + uint16_t data; - len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, - kone_command_firmware_version, 0, data, 2, - USB_CTRL_SET_TIMEOUT); + retval = roccat_common_receive(usb_dev, kone_command_firmware_version, + &data, 2); + if (retval) + return retval; - if (len != 2) { - kfree(data); - return -EIO; - } - *result = le16_to_cpu(*data); - kfree(data); + *result = le16_to_cpu(data); return 0; } @@ -435,23 +382,9 @@ static ssize_t kone_sysfs_show_tcu(struct device *dev, static int kone_tcu_command(struct usb_device *usb_dev, int number) { - int len; - char *value; - - value = kmalloc(1, GFP_KERNEL); - if (!value) - return -ENOMEM; - - *value = number; - - len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_CONFIGURATION, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, - kone_command_calibrate, 0, value, 1, - USB_CTRL_SET_TIMEOUT); - - kfree(value); - return ((len != 1) ? -EIO : 0); + unsigned char value; + value = number; + return roccat_common_send(usb_dev, kone_command_calibrate, &value, 1); } /* @@ -727,7 +660,8 @@ static int kone_init_specials(struct hid_device *hdev) goto exit_free; } - retval = roccat_connect(kone_class, hdev); + retval = roccat_connect(kone_class, hdev, + sizeof(struct kone_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); /* be tolerant about not getting chrdev */ @@ -827,8 +761,7 @@ static void kone_report_to_chrdev(struct kone_device const *kone, roccat_report.value = event->value; roccat_report.key = 0; roccat_report_event(kone->chrdev_minor, - (uint8_t *)&roccat_report, - sizeof(struct kone_roccat_report)); + (uint8_t *)&roccat_report); break; case kone_mouse_event_call_overlong_macro: if (event->value == kone_keystroke_action_press) { @@ -836,8 +769,7 @@ static void kone_report_to_chrdev(struct kone_device const *kone, roccat_report.value = kone->actual_profile; roccat_report.key = event->macro_key; roccat_report_event(kone->chrdev_minor, - (uint8_t *)&roccat_report, - sizeof(struct kone_roccat_report)); + (uint8_t *)&roccat_report); } break; } @@ -912,8 +844,8 @@ static int __init kone_init(void) static void __exit kone_exit(void) { - class_destroy(kone_class); hid_unregister_driver(&kone_driver); + class_destroy(kone_class); } module_init(kone_init); diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c index 1608c8d1efd6..33eec74e0615 100644 --- a/drivers/hid/hid-roccat-koneplus.c +++ b/drivers/hid/hid-roccat-koneplus.c @@ -19,11 +19,11 @@ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> -#include <linux/usb.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/hid-roccat.h> #include "hid-ids.h" -#include "hid-roccat.h" +#include "hid-roccat-common.h" #include "hid-roccat-koneplus.h" static uint profile_numbers[5] = {0, 1, 2, 3, 4}; @@ -39,110 +39,63 @@ static void koneplus_profile_activated(struct koneplus_device *koneplus, static int koneplus_send_control(struct usb_device *usb_dev, uint value, enum koneplus_control_requests request) { - int len; - struct koneplus_control *control; + struct koneplus_control control; if ((request == KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS || request == KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) && value > 4) return -EINVAL; - control = kmalloc(sizeof(struct koneplus_control), GFP_KERNEL); - if (!control) - return -ENOMEM; + control.command = KONEPLUS_COMMAND_CONTROL; + control.value = value; + control.request = request; - control->command = KONEPLUS_COMMAND_CONTROL; - control->value = value; - control->request = request; - - len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_CONFIGURATION, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, - KONEPLUS_USB_COMMAND_CONTROL, 0, control, - sizeof(struct koneplus_control), - USB_CTRL_SET_TIMEOUT); - - kfree(control); - - if (len != sizeof(struct koneplus_control)) - return len; - - return 0; -} - -static int koneplus_receive(struct usb_device *usb_dev, uint usb_command, - void *buf, uint size) { - int len; - - len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, - usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT); - - return (len != size) ? -EIO : 0; + return roccat_common_send(usb_dev, KONEPLUS_USB_COMMAND_CONTROL, + &control, sizeof(struct koneplus_control)); } static int koneplus_receive_control_status(struct usb_device *usb_dev) { int retval; - struct koneplus_control *control; - - control = kmalloc(sizeof(struct koneplus_control), GFP_KERNEL); - if (!control) - return -ENOMEM; + struct koneplus_control control; do { - retval = koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_CONTROL, - control, sizeof(struct koneplus_control)); + retval = roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_CONTROL, + &control, sizeof(struct koneplus_control)); /* check if we get a completely wrong answer */ if (retval) - goto out; + return retval; - if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_OK) { - retval = 0; - goto out; - } + if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OK) + return 0; /* indicates that hardware needs some more time to complete action */ - if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) { + if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) { msleep(500); /* windows driver uses 1000 */ continue; } /* seems to be critical - replug necessary */ - if (control->value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD) { - retval = -EINVAL; - goto out; - } - - dev_err(&usb_dev->dev, "koneplus_receive_control_status: " - "unknown response value 0x%x\n", control->value); - retval = -EINVAL; - goto out; + if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD) + return -EINVAL; + hid_err(usb_dev, "koneplus_receive_control_status: " + "unknown response value 0x%x\n", control.value); + return -EINVAL; } while (1); -out: - kfree(control); - return retval; } static int koneplus_send(struct usb_device *usb_dev, uint command, - void *buf, uint size) { - int len; - - len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_CONFIGURATION, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, - command, 0, buf, size, USB_CTRL_SET_TIMEOUT); - - if (len != size) - return -EIO; + void const *buf, uint size) +{ + int retval; - if (koneplus_receive_control_status(usb_dev)) - return -EIO; + retval = roccat_common_send(usb_dev, command, buf, size); + if (retval) + return retval; - return 0; + return koneplus_receive_control_status(usb_dev); } static int koneplus_select_profile(struct usb_device *usb_dev, uint number, @@ -167,7 +120,7 @@ static int koneplus_select_profile(struct usb_device *usb_dev, uint number, static int koneplus_get_info(struct usb_device *usb_dev, struct koneplus_info *buf) { - return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_INFO, + return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_INFO, buf, sizeof(struct koneplus_info)); } @@ -181,7 +134,7 @@ static int koneplus_get_profile_settings(struct usb_device *usb_dev, if (retval) return retval; - return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS, + return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS, buf, sizeof(struct koneplus_profile_settings)); } @@ -189,7 +142,7 @@ static int koneplus_set_profile_settings(struct usb_device *usb_dev, struct koneplus_profile_settings const *settings) { return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS, - (void *)settings, sizeof(struct koneplus_profile_settings)); + settings, sizeof(struct koneplus_profile_settings)); } static int koneplus_get_profile_buttons(struct usb_device *usb_dev, @@ -202,7 +155,7 @@ static int koneplus_get_profile_buttons(struct usb_device *usb_dev, if (retval) return retval; - return koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS, + return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS, buf, sizeof(struct koneplus_profile_buttons)); } @@ -210,27 +163,19 @@ static int koneplus_set_profile_buttons(struct usb_device *usb_dev, struct koneplus_profile_buttons const *buttons) { return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS, - (void *)buttons, sizeof(struct koneplus_profile_buttons)); + buttons, sizeof(struct koneplus_profile_buttons)); } /* retval is 0-4 on success, < 0 on error */ static int koneplus_get_startup_profile(struct usb_device *usb_dev) { - struct koneplus_startup_profile *buf; + struct koneplus_startup_profile buf; int retval; - buf = kmalloc(sizeof(struct koneplus_startup_profile), GFP_KERNEL); + retval = roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE, + &buf, sizeof(struct koneplus_startup_profile)); - retval = koneplus_receive(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE, - buf, sizeof(struct koneplus_startup_profile)); - - if (retval) - goto out; - - retval = buf->startup_profile; -out: - kfree(buf); - return retval; + return retval ? retval : buf.startup_profile; } static int koneplus_set_startup_profile(struct usb_device *usb_dev, @@ -243,7 +188,7 @@ static int koneplus_set_startup_profile(struct usb_device *usb_dev, buf.startup_profile = startup_profile; return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_STARTUP_PROFILE, - (char *)&buf, sizeof(struct koneplus_profile_buttons)); + &buf, sizeof(struct koneplus_profile_buttons)); } static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj, @@ -256,11 +201,14 @@ static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj, struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; + if (off >= real_size) + return 0; + if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&koneplus->koneplus_lock); - retval = koneplus_receive(usb_dev, command, buf, real_size); + retval = roccat_common_receive(usb_dev, command, buf, real_size); mutex_unlock(&koneplus->koneplus_lock); if (retval) @@ -283,7 +231,7 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj, return -EINVAL; mutex_lock(&koneplus->koneplus_lock); - retval = koneplus_send(usb_dev, command, (void *)buf, real_size); + retval = koneplus_send(usb_dev, command, buf, real_size); mutex_unlock(&koneplus->koneplus_lock); if (retval) @@ -347,7 +295,7 @@ static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp, count = sizeof(struct koneplus_profile_settings) - off; mutex_lock(&koneplus->koneplus_lock); - memcpy(buf, ((void const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off, + memcpy(buf, ((char const *)&koneplus->profile_settings[*(uint *)(attr->private)]) + off, count); mutex_unlock(&koneplus->koneplus_lock); @@ -406,7 +354,7 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp, count = sizeof(struct koneplus_profile_buttons) - off; mutex_lock(&koneplus->koneplus_lock); - memcpy(buf, ((void const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off, + memcpy(buf, ((char const *)&koneplus->profile_buttons[*(uint *)(attr->private)]) + off, count); mutex_unlock(&koneplus->koneplus_lock); @@ -512,7 +460,7 @@ static struct device_attribute koneplus_attributes[] = { static struct bin_attribute koneplus_bin_attributes[] = { { - .attr = { .name = "sensor", .mode = 0220 }, + .attr = { .name = "sensor", .mode = 0660 }, .size = sizeof(struct koneplus_sensor), .read = koneplus_sysfs_read_sensor, .write = koneplus_sysfs_write_sensor @@ -609,11 +557,13 @@ static int koneplus_init_koneplus_device_struct(struct usb_device *usb_dev, struct koneplus_device *koneplus) { int retval, i; - static uint wait = 70; /* device will freeze with just 60 */ + static uint wait = 100; /* device will freeze with just 60 */ mutex_init(&koneplus->koneplus_lock); koneplus->startup_profile = koneplus_get_startup_profile(usb_dev); + if (koneplus->startup_profile < 0) + return koneplus->startup_profile; msleep(wait); retval = koneplus_get_info(usb_dev, &koneplus->info); @@ -651,21 +601,21 @@ static int koneplus_init_specials(struct hid_device *hdev) koneplus = kzalloc(sizeof(*koneplus), GFP_KERNEL); if (!koneplus) { - dev_err(&hdev->dev, "can't alloc device descriptor\n"); + hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, koneplus); retval = koneplus_init_koneplus_device_struct(usb_dev, koneplus); if (retval) { - dev_err(&hdev->dev, - "couldn't init struct koneplus_device\n"); + hid_err(hdev, "couldn't init struct koneplus_device\n"); goto exit_free; } - retval = roccat_connect(koneplus_class, hdev); + retval = roccat_connect(koneplus_class, hdev, + sizeof(struct koneplus_roccat_report)); if (retval < 0) { - dev_err(&hdev->dev, "couldn't init char dev\n"); + hid_err(hdev, "couldn't init char dev\n"); } else { koneplus->chrdev_minor = retval; koneplus->roccat_claimed = 1; @@ -701,19 +651,19 @@ static int koneplus_probe(struct hid_device *hdev, retval = hid_parse(hdev); if (retval) { - dev_err(&hdev->dev, "parse failed\n"); + hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { - dev_err(&hdev->dev, "hw start failed\n"); + hid_err(hdev, "hw start failed\n"); goto exit; } retval = koneplus_init_specials(hdev); if (retval) { - dev_err(&hdev->dev, "couldn't install mouse\n"); + hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } @@ -769,8 +719,7 @@ static void koneplus_report_to_chrdev(struct koneplus_device const *koneplus, roccat_report.data2 = button_report->data2; roccat_report.profile = koneplus->actual_profile + 1; roccat_report_event(koneplus->chrdev_minor, - (uint8_t const *)&roccat_report, - sizeof(struct koneplus_roccat_report)); + (uint8_t const *)&roccat_report); } static int koneplus_raw_event(struct hid_device *hdev, @@ -825,8 +774,8 @@ static int __init koneplus_init(void) static void __exit koneplus_exit(void) { - class_destroy(koneplus_class); hid_unregister_driver(&koneplus_driver); + class_destroy(koneplus_class); } module_init(koneplus_init); diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c new file mode 100644 index 000000000000..984be2f8967e --- /dev/null +++ b/drivers/hid/hid-roccat-kovaplus.c @@ -0,0 +1,715 @@ +/* + * Roccat Kova[+] driver for Linux + * + * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +/* + * Roccat Kova[+] is a bigger version of the Pyra with two more side buttons. + */ + +#include <linux/device.h> +#include <linux/input.h> +#include <linux/hid.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/hid-roccat.h> +#include "hid-ids.h" +#include "hid-roccat-common.h" +#include "hid-roccat-kovaplus.h" + +static uint profile_numbers[5] = {0, 1, 2, 3, 4}; + +static struct class *kovaplus_class; + +static uint kovaplus_convert_event_cpi(uint value) +{ + return (value == 7 ? 4 : (value == 4 ? 3 : value)); +} + +static void kovaplus_profile_activated(struct kovaplus_device *kovaplus, + uint new_profile_index) +{ + kovaplus->actual_profile = new_profile_index; + kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level; + kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x; + kovaplus->actual_y_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_y; +} + +static int kovaplus_send_control(struct usb_device *usb_dev, uint value, + enum kovaplus_control_requests request) +{ + int retval; + struct kovaplus_control control; + + if ((request == KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS || + request == KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) && + value > 4) + return -EINVAL; + + control.command = KOVAPLUS_COMMAND_CONTROL; + control.value = value; + control.request = request; + + retval = roccat_common_send(usb_dev, KOVAPLUS_USB_COMMAND_CONTROL, + &control, sizeof(struct kovaplus_control)); + + return retval; +} + +static int kovaplus_receive_control_status(struct usb_device *usb_dev) +{ + int retval; + struct kovaplus_control control; + + do { + retval = roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_CONTROL, + &control, sizeof(struct kovaplus_control)); + + /* check if we get a completely wrong answer */ + if (retval) + return retval; + + if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OK) + return 0; + + /* indicates that hardware needs some more time to complete action */ + if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT) { + msleep(500); /* windows driver uses 1000 */ + continue; + } + + /* seems to be critical - replug necessary */ + if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD) + return -EINVAL; + + hid_err(usb_dev, "kovaplus_receive_control_status: " + "unknown response value 0x%x\n", control.value); + return -EINVAL; + } while (1); +} + +static int kovaplus_send(struct usb_device *usb_dev, uint command, + void const *buf, uint size) +{ + int retval; + + retval = roccat_common_send(usb_dev, command, buf, size); + if (retval) + return retval; + + msleep(100); + + return kovaplus_receive_control_status(usb_dev); +} + +static int kovaplus_select_profile(struct usb_device *usb_dev, uint number, + enum kovaplus_control_requests request) +{ + return kovaplus_send_control(usb_dev, number, request); +} + +static int kovaplus_get_info(struct usb_device *usb_dev, + struct kovaplus_info *buf) +{ + return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_INFO, + buf, sizeof(struct kovaplus_info)); +} + +static int kovaplus_get_profile_settings(struct usb_device *usb_dev, + struct kovaplus_profile_settings *buf, uint number) +{ + int retval; + + retval = kovaplus_select_profile(usb_dev, number, + KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS); + if (retval) + return retval; + + return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS, + buf, sizeof(struct kovaplus_profile_settings)); +} + +static int kovaplus_set_profile_settings(struct usb_device *usb_dev, + struct kovaplus_profile_settings const *settings) +{ + return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS, + settings, sizeof(struct kovaplus_profile_settings)); +} + +static int kovaplus_get_profile_buttons(struct usb_device *usb_dev, + struct kovaplus_profile_buttons *buf, int number) +{ + int retval; + + retval = kovaplus_select_profile(usb_dev, number, + KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS); + if (retval) + return retval; + + return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS, + buf, sizeof(struct kovaplus_profile_buttons)); +} + +static int kovaplus_set_profile_buttons(struct usb_device *usb_dev, + struct kovaplus_profile_buttons const *buttons) +{ + return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS, + buttons, sizeof(struct kovaplus_profile_buttons)); +} + +/* retval is 0-4 on success, < 0 on error */ +static int kovaplus_get_actual_profile(struct usb_device *usb_dev) +{ + struct kovaplus_actual_profile buf; + int retval; + + retval = roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE, + &buf, sizeof(struct kovaplus_actual_profile)); + + return retval ? retval : buf.actual_profile; +} + +static int kovaplus_set_actual_profile(struct usb_device *usb_dev, + int new_profile) +{ + struct kovaplus_actual_profile buf; + + buf.command = KOVAPLUS_COMMAND_ACTUAL_PROFILE; + buf.size = sizeof(struct kovaplus_actual_profile); + buf.actual_profile = new_profile; + + return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE, + &buf, sizeof(struct kovaplus_actual_profile)); +} + +static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp, + struct kobject *kobj, struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = + container_of(kobj, struct device, kobj)->parent->parent; + struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev)); + + if (off >= sizeof(struct kovaplus_profile_settings)) + return 0; + + if (off + count > sizeof(struct kovaplus_profile_settings)) + count = sizeof(struct kovaplus_profile_settings) - off; + + mutex_lock(&kovaplus->kovaplus_lock); + memcpy(buf, ((char const *)&kovaplus->profile_settings[*(uint *)(attr->private)]) + off, + count); + mutex_unlock(&kovaplus->kovaplus_lock); + + return count; +} + +static ssize_t kovaplus_sysfs_write_profile_settings(struct file *fp, + struct kobject *kobj, struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = + container_of(kobj, struct device, kobj)->parent->parent; + struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev)); + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); + int retval = 0; + int difference; + int profile_index; + struct kovaplus_profile_settings *profile_settings; + + if (off != 0 || count != sizeof(struct kovaplus_profile_settings)) + return -EINVAL; + + profile_index = ((struct kovaplus_profile_settings const *)buf)->profile_index; + profile_settings = &kovaplus->profile_settings[profile_index]; + + mutex_lock(&kovaplus->kovaplus_lock); + difference = memcmp(buf, profile_settings, + sizeof(struct kovaplus_profile_settings)); + if (difference) { + retval = kovaplus_set_profile_settings(usb_dev, + (struct kovaplus_profile_settings const *)buf); + if (!retval) + memcpy(profile_settings, buf, + sizeof(struct kovaplus_profile_settings)); + } + mutex_unlock(&kovaplus->kovaplus_lock); + + if (retval) + return retval; + + return sizeof(struct kovaplus_profile_settings); +} + +static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp, + struct kobject *kobj, struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = + container_of(kobj, struct device, kobj)->parent->parent; + struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev)); + + if (off >= sizeof(struct kovaplus_profile_buttons)) + return 0; + + if (off + count > sizeof(struct kovaplus_profile_buttons)) + count = sizeof(struct kovaplus_profile_buttons) - off; + + mutex_lock(&kovaplus->kovaplus_lock); + memcpy(buf, ((char const *)&kovaplus->profile_buttons[*(uint *)(attr->private)]) + off, + count); + mutex_unlock(&kovaplus->kovaplus_lock); + + return count; +} + +static ssize_t kovaplus_sysfs_write_profile_buttons(struct file *fp, + struct kobject *kobj, struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = + container_of(kobj, struct device, kobj)->parent->parent; + struct kovaplus_device *kovaplus = hid_get_drvdata(dev_get_drvdata(dev)); + struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); + int retval = 0; + int difference; + uint profile_index; + struct kovaplus_profile_buttons *profile_buttons; + + if (off != 0 || count != sizeof(struct kovaplus_profile_buttons)) + return -EINVAL; + + profile_index = ((struct kovaplus_profile_buttons const *)buf)->profile_index; + profile_buttons = &kovaplus->profile_buttons[profile_index]; + + mutex_lock(&kovaplus->kovaplus_lock); + difference = memcmp(buf, profile_buttons, + sizeof(struct kovaplus_profile_buttons)); + if (difference) { + retval = kovaplus_set_profile_buttons(usb_dev, + (struct kovaplus_profile_buttons const *)buf); + if (!retval) + memcpy(profile_buttons, buf, + sizeof(struct kovaplus_profile_buttons)); + } + mutex_unlock(&kovaplus->kovaplus_lock); + + if (retval) + return retval; + + return sizeof(struct kovaplus_profile_buttons); +} + +static ssize_t kovaplus_sysfs_show_actual_profile(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kovaplus_device *kovaplus = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_profile); +} + +static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev, + struct device_attribute *attr, char const *buf, size_t size) +{ + struct kovaplus_device *kovaplus; + struct usb_device *usb_dev; + unsigned long profile; + int retval; + + dev = dev->parent->parent; + kovaplus = hid_get_drvdata(dev_get_drvdata(dev)); + usb_dev = interface_to_usbdev(to_usb_interface(dev)); + + retval = strict_strtoul(buf, 10, &profile); + if (retval) + return retval; + + if (profile >= 5) + return -EINVAL; + + mutex_lock(&kovaplus->kovaplus_lock); + retval = kovaplus_set_actual_profile(usb_dev, profile); + kovaplus->actual_profile = profile; + mutex_unlock(&kovaplus->kovaplus_lock); + if (retval) + return retval; + + return size; +} + +static ssize_t kovaplus_sysfs_show_actual_cpi(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kovaplus_device *kovaplus = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_cpi); +} + +static ssize_t kovaplus_sysfs_show_actual_sensitivity_x(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kovaplus_device *kovaplus = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_x_sensitivity); +} + +static ssize_t kovaplus_sysfs_show_actual_sensitivity_y(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kovaplus_device *kovaplus = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_y_sensitivity); +} + +static ssize_t kovaplus_sysfs_show_firmware_version(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct kovaplus_device *kovaplus = + hid_get_drvdata(dev_get_drvdata(dev->parent->parent)); + return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->info.firmware_version); +} + +static struct device_attribute kovaplus_attributes[] = { + __ATTR(actual_cpi, 0440, + kovaplus_sysfs_show_actual_cpi, NULL), + __ATTR(firmware_version, 0440, + kovaplus_sysfs_show_firmware_version, NULL), + __ATTR(actual_profile, 0660, + kovaplus_sysfs_show_actual_profile, + kovaplus_sysfs_set_actual_profile), + __ATTR(actual_sensitivity_x, 0440, + kovaplus_sysfs_show_actual_sensitivity_x, NULL), + __ATTR(actual_sensitivity_y, 0440, + kovaplus_sysfs_show_actual_sensitivity_y, NULL), + __ATTR_NULL +}; + +static struct bin_attribute kovaplus_bin_attributes[] = { + { + .attr = { .name = "profile_settings", .mode = 0220 }, + .size = sizeof(struct kovaplus_profile_settings), + .write = kovaplus_sysfs_write_profile_settings + }, + { + .attr = { .name = "profile1_settings", .mode = 0440 }, + .size = sizeof(struct kovaplus_profile_settings), + .read = kovaplus_sysfs_read_profilex_settings, + .private = &profile_numbers[0] + }, + { + .attr = { .name = "profile2_settings", .mode = 0440 }, + .size = sizeof(struct kovaplus_profile_settings), + .read = kovaplus_sysfs_read_profilex_settings, + .private = &profile_numbers[1] + }, + { + .attr = { .name = "profile3_settings", .mode = 0440 }, + .size = sizeof(struct kovaplus_profile_settings), + .read = kovaplus_sysfs_read_profilex_settings, + .private = &profile_numbers[2] + }, + { + .attr = { .name = "profile4_settings", .mode = 0440 }, + .size = sizeof(struct kovaplus_profile_settings), + .read = kovaplus_sysfs_read_profilex_settings, + .private = &profile_numbers[3] + }, + { + .attr = { .name = "profile5_settings", .mode = 0440 }, + .size = sizeof(struct kovaplus_profile_settings), + .read = kovaplus_sysfs_read_profilex_settings, + .private = &profile_numbers[4] + }, + { + .attr = { .name = "profile_buttons", .mode = 0220 }, + .size = sizeof(struct kovaplus_profile_buttons), + .write = kovaplus_sysfs_write_profile_buttons + }, + { + .attr = { .name = "profile1_buttons", .mode = 0440 }, + .size = sizeof(struct kovaplus_profile_buttons), + .read = kovaplus_sysfs_read_profilex_buttons, + .private = &profile_numbers[0] + }, + { + .attr = { .name = "profile2_buttons", .mode = 0440 }, + .size = sizeof(struct kovaplus_profile_buttons), + .read = kovaplus_sysfs_read_profilex_buttons, + .private = &profile_numbers[1] + }, + { + .attr = { .name = "profile3_buttons", .mode = 0440 }, + .size = sizeof(struct kovaplus_profile_buttons), + .read = kovaplus_sysfs_read_profilex_buttons, + .private = &profile_numbers[2] + }, + { + .attr = { .name = "profile4_buttons", .mode = 0440 }, + .size = sizeof(struct kovaplus_profile_buttons), + .read = kovaplus_sysfs_read_profilex_buttons, + .private = &profile_numbers[3] + }, + { + .attr = { .name = "profile5_buttons", .mode = 0440 }, + .size = sizeof(struct kovaplus_profile_buttons), + .read = kovaplus_sysfs_read_profilex_buttons, + .private = &profile_numbers[4] + }, + __ATTR_NULL +}; + +static int kovaplus_init_kovaplus_device_struct(struct usb_device *usb_dev, + struct kovaplus_device *kovaplus) +{ + int retval, i; + static uint wait = 70; /* device will freeze with just 60 */ + + mutex_init(&kovaplus->kovaplus_lock); + + retval = kovaplus_get_info(usb_dev, &kovaplus->info); + if (retval) + return retval; + + for (i = 0; i < 5; ++i) { + msleep(wait); + retval = kovaplus_get_profile_settings(usb_dev, + &kovaplus->profile_settings[i], i); + if (retval) + return retval; + + msleep(wait); + retval = kovaplus_get_profile_buttons(usb_dev, + &kovaplus->profile_buttons[i], i); + if (retval) + return retval; + } + + msleep(wait); + retval = kovaplus_get_actual_profile(usb_dev); + if (retval < 0) + return retval; + kovaplus_profile_activated(kovaplus, retval); + + return 0; +} + +static int kovaplus_init_specials(struct hid_device *hdev) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct usb_device *usb_dev = interface_to_usbdev(intf); + struct kovaplus_device *kovaplus; + int retval; + + if (intf->cur_altsetting->desc.bInterfaceProtocol + == USB_INTERFACE_PROTOCOL_MOUSE) { + + kovaplus = kzalloc(sizeof(*kovaplus), GFP_KERNEL); + if (!kovaplus) { + hid_err(hdev, "can't alloc device descriptor\n"); + return -ENOMEM; + } + hid_set_drvdata(hdev, kovaplus); + + retval = kovaplus_init_kovaplus_device_struct(usb_dev, kovaplus); + if (retval) { + hid_err(hdev, "couldn't init struct kovaplus_device\n"); + goto exit_free; + } + + retval = roccat_connect(kovaplus_class, hdev, + sizeof(struct kovaplus_roccat_report)); + if (retval < 0) { + hid_err(hdev, "couldn't init char dev\n"); + } else { + kovaplus->chrdev_minor = retval; + kovaplus->roccat_claimed = 1; + } + + } else { + hid_set_drvdata(hdev, NULL); + } + + return 0; +exit_free: + kfree(kovaplus); + return retval; +} + +static void kovaplus_remove_specials(struct hid_device *hdev) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct kovaplus_device *kovaplus; + + if (intf->cur_altsetting->desc.bInterfaceProtocol + == USB_INTERFACE_PROTOCOL_MOUSE) { + kovaplus = hid_get_drvdata(hdev); + if (kovaplus->roccat_claimed) + roccat_disconnect(kovaplus->chrdev_minor); + kfree(kovaplus); + } +} + +static int kovaplus_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + int retval; + + retval = hid_parse(hdev); + if (retval) { + hid_err(hdev, "parse failed\n"); + goto exit; + } + + retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); + if (retval) { + hid_err(hdev, "hw start failed\n"); + goto exit; + } + + retval = kovaplus_init_specials(hdev); + if (retval) { + hid_err(hdev, "couldn't install mouse\n"); + goto exit_stop; + } + + return 0; + +exit_stop: + hid_hw_stop(hdev); +exit: + return retval; +} + +static void kovaplus_remove(struct hid_device *hdev) +{ + kovaplus_remove_specials(hdev); + hid_hw_stop(hdev); +} + +static void kovaplus_keep_values_up_to_date(struct kovaplus_device *kovaplus, + u8 const *data) +{ + struct kovaplus_mouse_report_button const *button_report; + + if (data[0] != KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON) + return; + + button_report = (struct kovaplus_mouse_report_button const *)data; + + switch (button_report->type) { + case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1: + kovaplus_profile_activated(kovaplus, button_report->data1 - 1); + break; + case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI: + kovaplus->actual_cpi = kovaplus_convert_event_cpi(button_report->data1); + case KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY: + kovaplus->actual_x_sensitivity = button_report->data1; + kovaplus->actual_y_sensitivity = button_report->data2; + } +} + +static void kovaplus_report_to_chrdev(struct kovaplus_device const *kovaplus, + u8 const *data) +{ + struct kovaplus_roccat_report roccat_report; + struct kovaplus_mouse_report_button const *button_report; + + if (data[0] != KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON) + return; + + button_report = (struct kovaplus_mouse_report_button const *)data; + + if (button_report->type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_2) + return; + + roccat_report.type = button_report->type; + roccat_report.profile = kovaplus->actual_profile + 1; + + if (roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MACRO || + roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SHORTCUT || + roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH || + roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER) + roccat_report.button = button_report->data1; + else + roccat_report.button = 0; + + if (roccat_report.type == KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI) + roccat_report.data1 = kovaplus_convert_event_cpi(button_report->data1); + else + roccat_report.data1 = button_report->data1; + + roccat_report.data2 = button_report->data2; + + roccat_report_event(kovaplus->chrdev_minor, + (uint8_t const *)&roccat_report); +} + +static int kovaplus_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *data, int size) +{ + struct usb_interface *intf = to_usb_interface(hdev->dev.parent); + struct kovaplus_device *kovaplus = hid_get_drvdata(hdev); + + if (intf->cur_altsetting->desc.bInterfaceProtocol + != USB_INTERFACE_PROTOCOL_MOUSE) + return 0; + + kovaplus_keep_values_up_to_date(kovaplus, data); + + if (kovaplus->roccat_claimed) + kovaplus_report_to_chrdev(kovaplus, data); + + return 0; +} + +static const struct hid_device_id kovaplus_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) }, + { } +}; + +MODULE_DEVICE_TABLE(hid, kovaplus_devices); + +static struct hid_driver kovaplus_driver = { + .name = "kovaplus", + .id_table = kovaplus_devices, + .probe = kovaplus_probe, + .remove = kovaplus_remove, + .raw_event = kovaplus_raw_event +}; + +static int __init kovaplus_init(void) +{ + int retval; + + kovaplus_class = class_create(THIS_MODULE, "kovaplus"); + if (IS_ERR(kovaplus_class)) + return PTR_ERR(kovaplus_class); + kovaplus_class->dev_attrs = kovaplus_attributes; + kovaplus_class->dev_bin_attrs = kovaplus_bin_attributes; + + retval = hid_register_driver(&kovaplus_driver); + if (retval) + class_destroy(kovaplus_class); + return retval; +} + +static void __exit kovaplus_exit(void) +{ + hid_unregister_driver(&kovaplus_driver); + class_destroy(kovaplus_class); +} + +module_init(kovaplus_init); +module_exit(kovaplus_exit); + +MODULE_AUTHOR("Stefan Achatz"); +MODULE_DESCRIPTION("USB Roccat Kova[+] driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hid/hid-roccat-kovaplus.h b/drivers/hid/hid-roccat-kovaplus.h new file mode 100644 index 000000000000..ce40607d21c7 --- /dev/null +++ b/drivers/hid/hid-roccat-kovaplus.h @@ -0,0 +1,157 @@ +#ifndef __HID_ROCCAT_KOVAPLUS_H +#define __HID_ROCCAT_KOVAPLUS_H + +/* + * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/types.h> + +struct kovaplus_control { + uint8_t command; /* KOVAPLUS_COMMAND_CONTROL */ + uint8_t value; + uint8_t request; +} __packed; + +enum kovaplus_control_requests { + /* read after write; value = 1 */ + KOVAPLUS_CONTROL_REQUEST_STATUS = 0x0, + /* write; value = profile number range 0-4 */ + KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10, + /* write; value = profile number range 0-4 */ + KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x20, +}; + +enum kovaplus_control_values { + KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0, /* supposed */ + KOVAPLUS_CONTROL_REQUEST_STATUS_OK = 1, + KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT = 3, /* supposed */ +}; + +struct kovaplus_actual_profile { + uint8_t command; /* KOVAPLUS_COMMAND_ACTUAL_PROFILE */ + uint8_t size; /* always 3 */ + uint8_t actual_profile; /* Range 0-4! */ +} __packed; + +struct kovaplus_profile_settings { + uint8_t command; /* KOVAPLUS_COMMAND_PROFILE_SETTINGS */ + uint8_t size; /* 16 */ + uint8_t profile_index; /* range 0-4 */ + uint8_t unknown1; + uint8_t sensitivity_x; /* range 1-10 */ + uint8_t sensitivity_y; /* range 1-10 */ + uint8_t cpi_levels_enabled; + uint8_t cpi_startup_level; /* range 1-4 */ + uint8_t data[8]; +} __packed; + +struct kovaplus_profile_buttons { + uint8_t command; /* KOVAPLUS_COMMAND_PROFILE_BUTTONS */ + uint8_t size; /* 23 */ + uint8_t profile_index; /* range 0-4 */ + uint8_t data[20]; +} __packed; + +struct kovaplus_info { + uint8_t command; /* KOVAPLUS_COMMAND_INFO */ + uint8_t size; /* 6 */ + uint8_t firmware_version; + uint8_t unknown[3]; +} __packed; + +/* writes 1 on plugin */ +struct kovaplus_a { + uint8_t command; /* KOVAPLUS_COMMAND_A */ + uint8_t size; /* 3 */ + uint8_t unknown; +} __packed; + +enum kovaplus_commands { + KOVAPLUS_COMMAND_CONTROL = 0x4, + KOVAPLUS_COMMAND_ACTUAL_PROFILE = 0x5, + KOVAPLUS_COMMAND_PROFILE_SETTINGS = 0x6, + KOVAPLUS_COMMAND_PROFILE_BUTTONS = 0x7, + KOVAPLUS_COMMAND_INFO = 0x9, + KOVAPLUS_COMMAND_A = 0xa, +}; + +enum kovaplus_usb_commands { + KOVAPLUS_USB_COMMAND_CONTROL = 0x304, + KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE = 0x305, + KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS = 0x306, + KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS = 0x307, + KOVAPLUS_USB_COMMAND_INFO = 0x309, + KOVAPLUS_USB_COMMAND_A = 0x30a, +}; + +enum kovaplus_mouse_report_numbers { + KOVAPLUS_MOUSE_REPORT_NUMBER_MOUSE = 1, + KOVAPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2, + KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON = 3, + KOVAPLUS_MOUSE_REPORT_NUMBER_KBD = 4, +}; + +struct kovaplus_mouse_report_button { + uint8_t report_number; /* KOVAPLUS_MOUSE_REPORT_NUMBER_BUTTON */ + uint8_t unknown1; + uint8_t type; + uint8_t data1; + uint8_t data2; +} __packed; + +enum kovaplus_mouse_report_button_types { + /* data1 = profile_number range 1-5; no release event */ + KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_1 = 0x20, + /* data1 = profile_number range 1-5; no release event */ + KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE_2 = 0x30, + /* data1 = button_number range 1-18; data2 = action */ + KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MACRO = 0x40, + /* data1 = button_number range 1-18; data2 = action */ + KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SHORTCUT = 0x50, + /* data1 = button_number range 1-18; data2 = action */ + KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH = 0x60, + /* data1 = button_number range 1-18; data2 = action */ + KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_TIMER = 0x80, + /* data1 = 1 = 400, 2 = 800, 4 = 1600, 7 = 3200; no release event */ + KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_CPI = 0xb0, + /* data1 + data2 = sense range 1-10; no release event */ + KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY = 0xc0, + /* data1 = type as in profile_buttons; data2 = action */ + KOVAPLUS_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 0xf0, +}; + +enum kovaplus_mouse_report_button_actions { + KOVAPLUS_MOUSE_REPORT_BUTTON_ACTION_PRESS = 0, + KOVAPLUS_MOUSE_REPORT_BUTTON_ACTION_RELEASE = 1, +}; + +struct kovaplus_roccat_report { + uint8_t type; + uint8_t profile; + uint8_t button; + uint8_t data1; + uint8_t data2; +} __packed; + +struct kovaplus_device { + int actual_profile; + int actual_cpi; + int actual_x_sensitivity; + int actual_y_sensitivity; + int roccat_claimed; + int chrdev_minor; + struct mutex kovaplus_lock; + struct kovaplus_info info; + struct kovaplus_profile_settings profile_settings[5]; + struct kovaplus_profile_buttons profile_buttons[5]; +}; + +#endif diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c index 02c58e015bee..160f481344f6 100644 --- a/drivers/hid/hid-roccat-pyra.c +++ b/drivers/hid/hid-roccat-pyra.c @@ -20,11 +20,11 @@ #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> -#include <linux/usb.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/hid-roccat.h> #include "hid-ids.h" -#include "hid-roccat.h" +#include "hid-roccat-common.h" #include "hid-roccat-pyra.h" static uint profile_numbers[5] = {0, 1, 2, 3, 4}; @@ -42,7 +42,6 @@ static void profile_activated(struct pyra_device *pyra, static int pyra_send_control(struct usb_device *usb_dev, int value, enum pyra_control_requests request) { - int len; struct pyra_control control; if ((request == PYRA_CONTROL_REQUEST_PROFILE_SETTINGS || @@ -54,47 +53,31 @@ static int pyra_send_control(struct usb_device *usb_dev, int value, control.value = value; control.request = request; - len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_CONFIGURATION, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, - PYRA_USB_COMMAND_CONTROL, 0, (char *)&control, - sizeof(struct pyra_control), - USB_CTRL_SET_TIMEOUT); - - if (len != sizeof(struct pyra_control)) - return len; - - return 0; + return roccat_common_send(usb_dev, PYRA_USB_COMMAND_CONTROL, + &control, sizeof(struct pyra_control)); } static int pyra_receive_control_status(struct usb_device *usb_dev) { - int len; + int retval; struct pyra_control control; do { msleep(10); - - len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | - USB_DIR_IN, - PYRA_USB_COMMAND_CONTROL, 0, (char *)&control, - sizeof(struct pyra_control), - USB_CTRL_SET_TIMEOUT); + retval = roccat_common_receive(usb_dev, PYRA_USB_COMMAND_CONTROL, + &control, sizeof(struct pyra_control)); /* requested too early, try again */ - } while (len == -EPROTO); + } while (retval == -EPROTO); - if (len == sizeof(struct pyra_control) && - control.command == PYRA_COMMAND_CONTROL && + if (!retval && control.command == PYRA_COMMAND_CONTROL && control.request == PYRA_CONTROL_REQUEST_STATUS && control.value == 1) - return 0; + return 0; else { hid_err(usb_dev, "receive control status: unknown response 0x%x 0x%x\n", control.request, control.value); - return -EINVAL; + return retval ? retval : -EINVAL; } } @@ -102,125 +85,72 @@ static int pyra_get_profile_settings(struct usb_device *usb_dev, struct pyra_profile_settings *buf, int number) { int retval; - retval = pyra_send_control(usb_dev, number, PYRA_CONTROL_REQUEST_PROFILE_SETTINGS); - if (retval) return retval; - - retval = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, - PYRA_USB_COMMAND_PROFILE_SETTINGS, 0, (char *)buf, - sizeof(struct pyra_profile_settings), - USB_CTRL_SET_TIMEOUT); - - if (retval != sizeof(struct pyra_profile_settings)) - return retval; - - return 0; + return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_PROFILE_SETTINGS, + buf, sizeof(struct pyra_profile_settings)); } static int pyra_get_profile_buttons(struct usb_device *usb_dev, struct pyra_profile_buttons *buf, int number) { int retval; - retval = pyra_send_control(usb_dev, number, PYRA_CONTROL_REQUEST_PROFILE_BUTTONS); - if (retval) return retval; - - retval = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, - PYRA_USB_COMMAND_PROFILE_BUTTONS, 0, (char *)buf, - sizeof(struct pyra_profile_buttons), - USB_CTRL_SET_TIMEOUT); - - if (retval != sizeof(struct pyra_profile_buttons)) - return retval; - - return 0; + return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_PROFILE_BUTTONS, + buf, sizeof(struct pyra_profile_buttons)); } static int pyra_get_settings(struct usb_device *usb_dev, struct pyra_settings *buf) { - int len; - len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, - PYRA_USB_COMMAND_SETTINGS, 0, buf, - sizeof(struct pyra_settings), USB_CTRL_SET_TIMEOUT); - if (len != sizeof(struct pyra_settings)) - return -EIO; - return 0; + return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_SETTINGS, + buf, sizeof(struct pyra_settings)); } static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf) { - int len; - len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), - USB_REQ_CLEAR_FEATURE, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, - PYRA_USB_COMMAND_INFO, 0, buf, - sizeof(struct pyra_info), USB_CTRL_SET_TIMEOUT); - if (len != sizeof(struct pyra_info)) - return -EIO; - return 0; + return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_INFO, + buf, sizeof(struct pyra_info)); +} + +static int pyra_send(struct usb_device *usb_dev, uint command, + void const *buf, uint size) +{ + int retval; + retval = roccat_common_send(usb_dev, command, buf, size); + if (retval) + return retval; + return pyra_receive_control_status(usb_dev); } static int pyra_set_profile_settings(struct usb_device *usb_dev, struct pyra_profile_settings const *settings) { - int len; - len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_CONFIGURATION, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, - PYRA_USB_COMMAND_PROFILE_SETTINGS, 0, (char *)settings, - sizeof(struct pyra_profile_settings), - USB_CTRL_SET_TIMEOUT); - if (len != sizeof(struct pyra_profile_settings)) - return -EIO; - if (pyra_receive_control_status(usb_dev)) - return -EIO; - return 0; + return pyra_send(usb_dev, PYRA_USB_COMMAND_PROFILE_SETTINGS, settings, + sizeof(struct pyra_profile_settings)); } static int pyra_set_profile_buttons(struct usb_device *usb_dev, struct pyra_profile_buttons const *buttons) { - int len; - len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_CONFIGURATION, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, - PYRA_USB_COMMAND_PROFILE_BUTTONS, 0, (char *)buttons, - sizeof(struct pyra_profile_buttons), - USB_CTRL_SET_TIMEOUT); - if (len != sizeof(struct pyra_profile_buttons)) - return -EIO; - if (pyra_receive_control_status(usb_dev)) - return -EIO; - return 0; + return pyra_send(usb_dev, PYRA_USB_COMMAND_PROFILE_BUTTONS, buttons, + sizeof(struct pyra_profile_buttons)); } static int pyra_set_settings(struct usb_device *usb_dev, struct pyra_settings const *settings) { - int len; - len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), - USB_REQ_SET_CONFIGURATION, - USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, - PYRA_USB_COMMAND_SETTINGS, 0, (char *)settings, - sizeof(struct pyra_settings), USB_CTRL_SET_TIMEOUT); - if (len != sizeof(struct pyra_settings)) - return -EIO; - if (pyra_receive_control_status(usb_dev)) - return -EIO; - return 0; + int retval; + retval = roccat_common_send(usb_dev, PYRA_USB_COMMAND_SETTINGS, settings, + sizeof(struct pyra_settings)); + if (retval) + return retval; + return pyra_receive_control_status(usb_dev); } static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp, @@ -521,21 +451,16 @@ static struct bin_attribute pyra_bin_attributes[] = { static int pyra_init_pyra_device_struct(struct usb_device *usb_dev, struct pyra_device *pyra) { - struct pyra_info *info; + struct pyra_info info; int retval, i; mutex_init(&pyra->pyra_lock); - info = kmalloc(sizeof(struct pyra_info), GFP_KERNEL); - if (!info) - return -ENOMEM; - retval = pyra_get_info(usb_dev, info); - if (retval) { - kfree(info); + retval = pyra_get_info(usb_dev, &info); + if (retval) return retval; - } - pyra->firmware_version = info->firmware_version; - kfree(info); + + pyra->firmware_version = info.firmware_version; retval = pyra_get_settings(usb_dev, &pyra->settings); if (retval) @@ -581,7 +506,8 @@ static int pyra_init_specials(struct hid_device *hdev) goto exit_free; } - retval = roccat_connect(pyra_class, hdev); + retval = roccat_connect(pyra_class, hdev, + sizeof(struct pyra_roccat_report)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { @@ -685,8 +611,7 @@ static void pyra_report_to_chrdev(struct pyra_device const *pyra, roccat_report.value = button_event->data1; roccat_report.key = 0; roccat_report_event(pyra->chrdev_minor, - (uint8_t const *)&roccat_report, - sizeof(struct pyra_roccat_report)); + (uint8_t const *)&roccat_report); break; case PYRA_MOUSE_EVENT_BUTTON_TYPE_MACRO: case PYRA_MOUSE_EVENT_BUTTON_TYPE_SHORTCUT: @@ -700,8 +625,7 @@ static void pyra_report_to_chrdev(struct pyra_device const *pyra, */ roccat_report.value = pyra->actual_profile + 1; roccat_report_event(pyra->chrdev_minor, - (uint8_t const *)&roccat_report, - sizeof(struct pyra_roccat_report)); + (uint8_t const *)&roccat_report); } break; } @@ -761,8 +685,8 @@ static int __init pyra_init(void) static void __exit pyra_exit(void) { - class_destroy(pyra_class); hid_unregister_driver(&pyra_driver); + class_destroy(pyra_class); } module_init(pyra_init); diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index a14c579ea781..5666e7587b18 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -26,8 +26,7 @@ #include <linux/cdev.h> #include <linux/poll.h> #include <linux/sched.h> - -#include "hid-roccat.h" +#include <linux/hid-roccat.h> #define ROCCAT_FIRST_MINOR 0 #define ROCCAT_MAX_DEVICES 8 @@ -37,11 +36,11 @@ struct roccat_report { uint8_t *value; - int len; }; struct roccat_device { unsigned int minor; + int report_size; int open; int exist; wait_queue_head_t wait; @@ -123,7 +122,7 @@ static ssize_t roccat_read(struct file *file, char __user *buffer, * If report is larger than requested amount of data, rest of report * is lost! */ - len = report->len > count ? count : report->len; + len = device->report_size > count ? count : device->report_size; if (copy_to_user(buffer, report->value, len)) { retval = -EFAULT; @@ -248,26 +247,25 @@ static int roccat_release(struct inode *inode, struct file *file) * * This is called from interrupt handler. */ -int roccat_report_event(int minor, u8 const *data, int len) +int roccat_report_event(int minor, u8 const *data) { struct roccat_device *device; struct roccat_reader *reader; struct roccat_report *report; uint8_t *new_value; - new_value = kmemdup(data, len, GFP_ATOMIC); + device = devices[minor]; + + new_value = kmemdup(data, device->report_size, GFP_ATOMIC); if (!new_value) return -ENOMEM; - device = devices[minor]; - report = &device->cbuf[device->cbuf_end]; /* passing NULL is safe */ kfree(report->value); report->value = new_value; - report->len = len; device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE; list_for_each_entry(reader, &device->readers, node) { @@ -295,7 +293,7 @@ EXPORT_SYMBOL_GPL(roccat_report_event); * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on * success, a negative error code on failure. */ -int roccat_connect(struct class *klass, struct hid_device *hid) +int roccat_connect(struct class *klass, struct hid_device *hid, int report_size) { unsigned int minor; struct roccat_device *device; @@ -343,6 +341,7 @@ int roccat_connect(struct class *klass, struct hid_device *hid) device->hid = hid; device->exist = 1; device->cbuf_end = 0; + device->report_size = report_size; return minor; } @@ -357,13 +356,16 @@ void roccat_disconnect(int minor) mutex_lock(&devices_lock); device = devices[minor]; - devices[minor] = NULL; mutex_unlock(&devices_lock); device->exist = 0; /* TODO exist maybe not needed */ device_destroy(device->dev->class, MKDEV(roccat_major, minor)); + mutex_lock(&devices_lock); + devices[minor] = NULL; + mutex_unlock(&devices_lock); + if (device->open) { hid_hw_close(device->hid); wake_up_interruptible(&device->wait); @@ -373,6 +375,34 @@ void roccat_disconnect(int minor) } EXPORT_SYMBOL_GPL(roccat_disconnect); +static long roccat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct inode *inode = file->f_path.dentry->d_inode; + struct roccat_device *device; + unsigned int minor = iminor(inode); + long retval = 0; + + mutex_lock(&devices_lock); + + device = devices[minor]; + if (!device) { + retval = -ENODEV; + goto out; + } + + switch (cmd) { + case ROCCATIOCGREPSIZE: + if (put_user(device->report_size, (int __user *)arg)) + retval = -EFAULT; + break; + default: + retval = -ENOTTY; + } +out: + mutex_unlock(&devices_lock); + return retval; +} + static const struct file_operations roccat_ops = { .owner = THIS_MODULE, .read = roccat_read, @@ -380,6 +410,7 @@ static const struct file_operations roccat_ops = { .open = roccat_open, .release = roccat_release, .llseek = noop_llseek, + .unlocked_ioctl = roccat_ioctl, }; static int __init roccat_init(void) diff --git a/drivers/hid/hid-roccat.h b/drivers/hid/hid-roccat.h deleted file mode 100644 index 5784281d613f..000000000000 --- a/drivers/hid/hid-roccat.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef __HID_ROCCAT_H -#define __HID_ROCCAT_H - -/* - * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> - */ - -/* - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - */ - -#include <linux/hid.h> -#include <linux/types.h> - -#if defined(CONFIG_HID_ROCCAT) || defined(CONFIG_HID_ROCCAT_MODULE) -int roccat_connect(struct class *klass, struct hid_device *hid); -void roccat_disconnect(int minor); -int roccat_report_event(int minor, u8 const *data, int len); -#else -static inline int roccat_connect(struct class *klass, - struct hid_device *hid) { return -1; } -static inline void roccat_disconnect(int minor) {} -static inline int roccat_report_event(int minor, u8 const *data, int len) -{ - return 0; -} -#endif - -#endif diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 68d7b36e31e4..93819a08121a 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -46,6 +46,16 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc, return rdesc; } +/* + * The Sony Sixaxis does not handle HID Output Reports on the Interrupt EP + * like it should according to usbhid/hid-core.c::usbhid_output_raw_report() + * so we need to override that forcing HID Output Reports on the Control EP. + * + * There is also another issue about HID Output Reports via USB, the Sixaxis + * does not want the report_id as part of the data packet, so we have to + * discard buf[0] when sending the actual control message, even for numbered + * reports, humpf! + */ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count, unsigned char report_type) { @@ -55,6 +65,12 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf, int report_id = buf[0]; int ret; + if (report_type == HID_OUTPUT_REPORT) { + /* Don't send the Report ID */ + buf++; + count--; + } + ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), HID_REQ_SET_REPORT, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, @@ -62,6 +78,10 @@ static int sixaxis_usb_output_raw_report(struct hid_device *hid, __u8 *buf, interface->desc.bInterfaceNumber, buf, count, USB_CTRL_SET_TIMEOUT); + /* Count also the Report ID, in case of an Output report. */ + if (ret > 0 && report_type == HID_OUTPUT_REPORT) + ret++; + return ret; } diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 468e87b53ed2..54409cba018c 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -91,7 +91,7 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, ret = -EFAULT; goto out; } - ret += len; + ret = len; kfree(list->buffer[list->tail].value); list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1); @@ -102,15 +102,14 @@ out: } /* the first byte is expected to be a report number */ -static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) +/* This function is to be called with the minors_lock mutex held */ +static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type) { unsigned int minor = iminor(file->f_path.dentry->d_inode); struct hid_device *dev; __u8 *buf; int ret = 0; - mutex_lock(&minors_lock); - if (!hidraw_table[minor]) { ret = -ENODEV; goto out; @@ -148,14 +147,92 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t goto out_free; } - ret = dev->hid_output_raw_report(dev, buf, count, HID_OUTPUT_REPORT); + ret = dev->hid_output_raw_report(dev, buf, count, report_type); out_free: kfree(buf); out: + return ret; +} + +/* the first byte is expected to be a report number */ +static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) +{ + ssize_t ret; + mutex_lock(&minors_lock); + ret = hidraw_send_report(file, buffer, count, HID_OUTPUT_REPORT); mutex_unlock(&minors_lock); return ret; } + +/* This function performs a Get_Report transfer over the control endpoint + per section 7.2.1 of the HID specification, version 1.1. The first byte + of buffer is the report number to request, or 0x0 if the defice does not + use numbered reports. The report_type parameter can be HID_FEATURE_REPORT + or HID_INPUT_REPORT. This function is to be called with the minors_lock + mutex held. */ +static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type) +{ + unsigned int minor = iminor(file->f_path.dentry->d_inode); + struct hid_device *dev; + __u8 *buf; + int ret = 0, len; + unsigned char report_number; + + dev = hidraw_table[minor]->hid; + + if (!dev->hid_get_raw_report) { + ret = -ENODEV; + goto out; + } + + if (count > HID_MAX_BUFFER_SIZE) { + printk(KERN_WARNING "hidraw: pid %d passed too large report\n", + task_pid_nr(current)); + ret = -EINVAL; + goto out; + } + + if (count < 2) { + printk(KERN_WARNING "hidraw: pid %d passed too short report\n", + task_pid_nr(current)); + ret = -EINVAL; + goto out; + } + + buf = kmalloc(count * sizeof(__u8), GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto out; + } + + /* Read the first byte from the user. This is the report number, + which is passed to dev->hid_get_raw_report(). */ + if (copy_from_user(&report_number, buffer, 1)) { + ret = -EFAULT; + goto out_free; + } + + ret = dev->hid_get_raw_report(dev, report_number, buf, count, report_type); + + if (ret < 0) + goto out_free; + + len = (ret < count) ? ret : count; + + if (copy_to_user(buffer, buf, len)) { + ret = -EFAULT; + goto out_free; + } + + ret = len; + +out_free: + kfree(buf); +out: + return ret; +} + static unsigned int hidraw_poll(struct file *file, poll_table *wait) { struct hidraw_list *list = file->private_data; @@ -295,7 +372,24 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, default: { struct hid_device *hid = dev->hid; - if (_IOC_TYPE(cmd) != 'H' || _IOC_DIR(cmd) != _IOC_READ) { + if (_IOC_TYPE(cmd) != 'H') { + ret = -EINVAL; + break; + } + + if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) { + int len = _IOC_SIZE(cmd); + ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT); + break; + } + if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) { + int len = _IOC_SIZE(cmd); + ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT); + break; + } + + /* Begin Read-only ioctls. */ + if (_IOC_DIR(cmd) != _IOC_READ) { ret = -EINVAL; break; } @@ -327,7 +421,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, -EFAULT : len; break; } - } + } ret = -ENOTTY; } @@ -428,12 +522,12 @@ void hidraw_disconnect(struct hid_device *hid) hidraw->exist = 0; + device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); + mutex_lock(&minors_lock); hidraw_table[hidraw->minor] = NULL; mutex_unlock(&minors_lock); - device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); - if (hidraw->open) { hid_hw_close(hid); wake_up_interruptible(&hidraw->wait); diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index b336dd84036f..38c261a40c74 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -799,6 +799,40 @@ static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid) return 0; } +static int usbhid_get_raw_report(struct hid_device *hid, + unsigned char report_number, __u8 *buf, size_t count, + unsigned char report_type) +{ + struct usbhid_device *usbhid = hid->driver_data; + struct usb_device *dev = hid_to_usb_dev(hid); + struct usb_interface *intf = usbhid->intf; + struct usb_host_interface *interface = intf->cur_altsetting; + int skipped_report_id = 0; + int ret; + + /* Byte 0 is the report number. Report data starts at byte 1.*/ + buf[0] = report_number; + if (report_number == 0x0) { + /* Offset the return buffer by 1, so that the report ID + will remain in byte 0. */ + buf++; + count--; + skipped_report_id = 1; + } + ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), + HID_REQ_GET_REPORT, + USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, + ((report_type + 1) << 8) | report_number, + interface->desc.bInterfaceNumber, buf, count, + USB_CTRL_SET_TIMEOUT); + + /* count also the report id */ + if (ret > 0 && skipped_report_id) + ret++; + + return ret; +} + static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count, unsigned char report_type) { @@ -1139,6 +1173,7 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id * usb_set_intfdata(intf, hid); hid->ll_driver = &usb_hid_driver; + hid->hid_get_raw_report = usbhid_get_raw_report; hid->hid_output_raw_report = usbhid_output_raw_report; hid->ff_init = hid_pidff_init; #ifdef CONFIG_USB_HIDDEV diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 297bc9a7d6e6..1bfb4439e4e1 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -467,6 +467,17 @@ config SENSORS_JC42 This driver can also be built as a module. If so, the module will be called jc42. +config SENSORS_LINEAGE + tristate "Lineage Compact Power Line Power Entry Module" + depends on I2C && EXPERIMENTAL + help + If you say yes here you get support for the Lineage Compact Power Line + series of DC/DC and AC/DC converters such as CP1800, CP2000AC, + CP2000DC, CP2725, and others. + + This driver can also be built as a module. If so, the module + will be called lineage-pem. + config SENSORS_LM63 tristate "National Semiconductor LM63 and LM64" depends on I2C @@ -625,6 +636,17 @@ config SENSORS_LM93 This driver can also be built as a module. If so, the module will be called lm93. +config SENSORS_LTC4151 + tristate "Linear Technology LTC4151" + depends on I2C + default n + help + If you say yes here you get support for Linear Technology LTC4151 + High Voltage I2C Current and Voltage Monitor interface. + + This driver can also be built as a module. If so, the module will + be called ltc4151. + config SENSORS_LTC4215 tristate "Linear Technology LTC4215" depends on I2C && EXPERIMENTAL @@ -685,6 +707,16 @@ config SENSORS_MAX1619 This driver can also be built as a module. If so, the module will be called max1619. +config SENSORS_MAX6639 + tristate "Maxim MAX6639 sensor chip" + depends on I2C && EXPERIMENTAL + help + If you say yes here you get support for the MAX6639 + sensor chips. + + This driver can also be built as a module. If so, the module + will be called max6639. + config SENSORS_MAX6650 tristate "Maxim MAX6650 sensor chip" depends on I2C && EXPERIMENTAL @@ -735,6 +767,61 @@ config SENSORS_PCF8591 These devices are hard to detect and rarely found on mainstream hardware. If unsure, say N. +config PMBUS + tristate "PMBus support" + depends on I2C && EXPERIMENTAL + default n + help + Say yes here if you want to enable PMBus support. + + This driver can also be built as a module. If so, the module will + be called pmbus_core. + +if PMBUS + +config SENSORS_PMBUS + tristate "Generic PMBus devices" + default n + help + If you say yes here you get hardware monitoring support for generic + PMBus devices, including but not limited to BMR450, BMR451, BMR453, + BMR454, and LTC2978. + + This driver can also be built as a module. If so, the module will + be called pmbus. + +config SENSORS_MAX16064 + tristate "Maxim MAX16064" + default n + help + If you say yes here you get hardware monitoring support for Maxim + MAX16064. + + This driver can also be built as a module. If so, the module will + be called max16064. + +config SENSORS_MAX34440 + tristate "Maxim MAX34440/MAX34441" + default n + help + If you say yes here you get hardware monitoring support for Maxim + MAX34440 and MAX34441. + + This driver can also be built as a module. If so, the module will + be called max34440. + +config SENSORS_MAX8688 + tristate "Maxim MAX8688" + default n + help + If you say yes here you get hardware monitoring support for Maxim + MAX8688. + + This driver can also be built as a module. If so, the module will + be called max8688. + +endif # PMBUS + config SENSORS_SHT15 tristate "Sensiron humidity and temperature sensors. SHT15 and compat." depends on GENERIC_GPIO @@ -1083,7 +1170,7 @@ config SENSORS_W83627HF will be called w83627hf. config SENSORS_W83627EHF - tristate "Winbond W83627EHF/EHG/DHG, W83667HG" + tristate "Winbond W83627EHF/EHG/DHG, W83667HG, NCT6775F, NCT6776F" select HWMON_VID help If you say yes here you get support for the hardware @@ -1094,7 +1181,8 @@ config SENSORS_W83627EHF chip suited for specific Intel processors that use PECI such as the Core 2 Duo. - This driver also supports the W83667HG chip. + This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F + (also known as W83667HG-I), and NCT6776F. This driver can also be built as a module. If so, the module will be called w83627ehf. diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index dde02d99c238..bd0410e4b44f 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -62,6 +62,7 @@ obj-$(CONFIG_SENSORS_JC42) += jc42.o obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o +obj-$(CONFIG_SENSORS_LINEAGE) += lineage-pem.o obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o obj-$(CONFIG_SENSORS_LIS3_SPI) += lis3lv02d.o lis3lv02d_spi.o obj-$(CONFIG_SENSORS_LIS3_I2C) += lis3lv02d.o lis3lv02d_i2c.o @@ -79,11 +80,13 @@ obj-$(CONFIG_SENSORS_LM90) += lm90.o obj-$(CONFIG_SENSORS_LM92) += lm92.o obj-$(CONFIG_SENSORS_LM93) += lm93.o obj-$(CONFIG_SENSORS_LM95241) += lm95241.o +obj-$(CONFIG_SENSORS_LTC4151) += ltc4151.o obj-$(CONFIG_SENSORS_LTC4215) += ltc4215.o obj-$(CONFIG_SENSORS_LTC4245) += ltc4245.o obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o obj-$(CONFIG_SENSORS_MAX1111) += max1111.o obj-$(CONFIG_SENSORS_MAX1619) += max1619.o +obj-$(CONFIG_SENSORS_MAX6639) += max6639.o obj-$(CONFIG_SENSORS_MAX6650) += max6650.o obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o obj-$(CONFIG_SENSORS_PC87360) += pc87360.o @@ -112,6 +115,13 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o +# PMBus drivers +obj-$(CONFIG_PMBUS) += pmbus_core.o +obj-$(CONFIG_SENSORS_PMBUS) += pmbus.o +obj-$(CONFIG_SENSORS_MAX16064) += max16064.o +obj-$(CONFIG_SENSORS_MAX34440) += max34440.o +obj-$(CONFIG_SENSORS_MAX8688) += max8688.o + ifeq ($(CONFIG_HWMON_DEBUG_CHIP),y) EXTRA_CFLAGS += -DDEBUG endif diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 6e06019015a5..a4d430ee7e20 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -1,6 +1,6 @@ /*************************************************************************** * Copyright (C) 2006 by Hans Edgington <hans@edgington.nl> * - * Copyright (C) 2007-2009 Hans de Goede <hdegoede@redhat.com> * + * Copyright (C) 2007-2011 Hans de Goede <hdegoede@redhat.com> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * @@ -47,22 +47,23 @@ #define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */ #define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ +#define SIO_F71808E_ID 0x0901 /* Chipset ID */ #define SIO_F71858_ID 0x0507 /* Chipset ID */ #define SIO_F71862_ID 0x0601 /* Chipset ID */ +#define SIO_F71869_ID 0x0814 /* Chipset ID */ #define SIO_F71882_ID 0x0541 /* Chipset ID */ #define SIO_F71889_ID 0x0723 /* Chipset ID */ +#define SIO_F71889E_ID 0x0909 /* Chipset ID */ #define SIO_F8000_ID 0x0581 /* Chipset ID */ #define REGION_LENGTH 8 #define ADDR_REG_OFFSET 5 #define DATA_REG_OFFSET 6 -#define F71882FG_REG_PECI 0x0A - -#define F71882FG_REG_IN_STATUS 0x12 /* f71882fg only */ -#define F71882FG_REG_IN_BEEP 0x13 /* f71882fg only */ +#define F71882FG_REG_IN_STATUS 0x12 /* f7188x only */ +#define F71882FG_REG_IN_BEEP 0x13 /* f7188x only */ #define F71882FG_REG_IN(nr) (0x20 + (nr)) -#define F71882FG_REG_IN1_HIGH 0x32 /* f71882fg only */ +#define F71882FG_REG_IN1_HIGH 0x32 /* f7188x only */ #define F71882FG_REG_FAN(nr) (0xA0 + (16 * (nr))) #define F71882FG_REG_FAN_TARGET(nr) (0xA2 + (16 * (nr))) @@ -86,28 +87,71 @@ #define F71882FG_REG_FAN_HYST(nr) (0x98 + (nr)) +#define F71882FG_REG_FAN_FAULT_T 0x9F +#define F71882FG_FAN_NEG_TEMP_EN 0x20 +#define F71882FG_FAN_PROG_SEL 0x80 + #define F71882FG_REG_POINT_PWM(pwm, point) (0xAA + (point) + (16 * (pwm))) #define F71882FG_REG_POINT_TEMP(pwm, point) (0xA6 + (point) + (16 * (pwm))) #define F71882FG_REG_POINT_MAPPING(nr) (0xAF + 16 * (nr)) #define F71882FG_REG_START 0x01 +#define F71882FG_MAX_INS 9 + #define FAN_MIN_DETECT 366 /* Lowest detectable fanspeed */ static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); -enum chips { f71858fg, f71862fg, f71882fg, f71889fg, f8000 }; +enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg, + f71889ed, f8000 }; static const char *f71882fg_names[] = { + "f71808e", "f71858fg", "f71862fg", + "f71869", /* Both f71869f and f71869e, reg. compatible and same id */ "f71882fg", "f71889fg", + "f71889ed", "f8000", }; +static const char f71882fg_has_in[8][F71882FG_MAX_INS] = { + { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, /* f71808e */ + { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f71858fg */ + { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71862fg */ + { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71869 */ + { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71882fg */ + { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889fg */ + { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, /* f71889ed */ + { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, /* f8000 */ +}; + +static const char f71882fg_has_in1_alarm[8] = { + 0, /* f71808e */ + 0, /* f71858fg */ + 0, /* f71862fg */ + 0, /* f71869 */ + 1, /* f71882fg */ + 1, /* f71889fg */ + 1, /* f71889ed */ + 0, /* f8000 */ +}; + +static const char f71882fg_has_beep[8] = { + 0, /* f71808e */ + 0, /* f71858fg */ + 1, /* f71862fg */ + 1, /* f71869 */ + 1, /* f71882fg */ + 1, /* f71889fg */ + 1, /* f71889ed */ + 0, /* f8000 */ +}; + static struct platform_device *f71882fg_pdev; /* Super-I/O Function prototypes */ @@ -129,11 +173,12 @@ struct f71882fg_data { struct mutex update_lock; int temp_start; /* temp numbering start (0 or 1) */ char valid; /* !=0 if following fields are valid */ + char auto_point_temp_signed; unsigned long last_updated; /* In jiffies */ unsigned long last_limits; /* In jiffies */ /* Register Values */ - u8 in[9]; + u8 in[F71882FG_MAX_INS]; u8 in1_max; u8 in_status; u8 in_beep; @@ -142,7 +187,7 @@ struct f71882fg_data { u16 fan_full_speed[4]; u8 fan_status; u8 fan_beep; - /* Note: all models have only 3 temperature channels, but on some + /* Note: all models have max 3 temperature channels, but on some they are addressed as 0-2 and on others as 1-3, so for coding convenience we reserve space for 4 channels */ u16 temp[4]; @@ -262,13 +307,9 @@ static struct platform_driver f71882fg_driver = { static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); -/* Temp and in attr for the f71858fg, the f71858fg is special as it - has its temperature indexes start at 0 (the others start at 1) and - it only has 3 voltage inputs */ -static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = { - SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), - SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), - SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), +/* Temp attr for the f71858fg, the f71858fg is special as it has its + temperature indexes start at 0 (the others start at 1) */ +static struct sensor_device_attribute_2 f71858fg_temp_attr[] = { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 0), @@ -292,7 +333,6 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = { SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 1), SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5), - SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1), SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, @@ -308,17 +348,8 @@ static struct sensor_device_attribute_2 f71858fg_in_temp_attr[] = { SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), }; -/* Temp and in attr common to the f71862fg, f71882fg and f71889fg */ -static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = { - SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), - SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), - SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), - SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3), - SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4), - SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5), - SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6), - SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7), - SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8), +/* Temp attr for the standard models */ +static struct sensor_device_attribute_2 fxxxx_temp_attr[3][9] = { { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 1), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 1), @@ -328,17 +359,14 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = { the max and crit alarms separately and lm_sensors v2 depends on the presence of temp#_alarm files. The same goes for temp2/3 _alarm. */ SENSOR_ATTR_2(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 1), - SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, - store_temp_beep, 0, 1), SENSOR_ATTR_2(temp1_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 1), SENSOR_ATTR_2(temp1_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 1), SENSOR_ATTR_2(temp1_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5), - SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, - store_temp_beep, 0, 5), SENSOR_ATTR_2(temp1_type, S_IRUGO, show_temp_type, NULL, 0, 1), SENSOR_ATTR_2(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0, 1), +}, { SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 0, 2), SENSOR_ATTR_2(temp2_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 2), @@ -346,17 +374,14 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = { store_temp_max_hyst, 0, 2), /* Should be temp2_max_alarm, see temp1_alarm note */ SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 2), - SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, - store_temp_beep, 0, 2), SENSOR_ATTR_2(temp2_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 2), SENSOR_ATTR_2(temp2_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 2), SENSOR_ATTR_2(temp2_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 6), - SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, - store_temp_beep, 0, 6), SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 2), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), +}, { SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 3), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 3), @@ -364,37 +389,39 @@ static struct sensor_device_attribute_2 fxxxx_in_temp_attr[] = { store_temp_max_hyst, 0, 3), /* Should be temp3_max_alarm, see temp1_alarm note */ SENSOR_ATTR_2(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 3), - SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, - store_temp_beep, 0, 3), SENSOR_ATTR_2(temp3_crit, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 3), SENSOR_ATTR_2(temp3_crit_hyst, S_IRUGO, show_temp_crit_hyst, NULL, 0, 3), SENSOR_ATTR_2(temp3_crit_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 7), - SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, - store_temp_beep, 0, 7), SENSOR_ATTR_2(temp3_type, S_IRUGO, show_temp_type, NULL, 0, 3), SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 3), -}; +} }; -/* For models with in1 alarm capability */ -static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = { - SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max, - 0, 1), - SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep, - 0, 1), - SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1), -}; +/* Temp attr for models which can beep on temp alarm */ +static struct sensor_device_attribute_2 fxxxx_temp_beep_attr[3][2] = { { + SENSOR_ATTR_2(temp1_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, + store_temp_beep, 0, 1), + SENSOR_ATTR_2(temp1_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, + store_temp_beep, 0, 5), +}, { + SENSOR_ATTR_2(temp2_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, + store_temp_beep, 0, 2), + SENSOR_ATTR_2(temp2_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, + store_temp_beep, 0, 6), +}, { + SENSOR_ATTR_2(temp3_max_beep, S_IRUGO|S_IWUSR, show_temp_beep, + store_temp_beep, 0, 3), + SENSOR_ATTR_2(temp3_crit_beep, S_IRUGO|S_IWUSR, show_temp_beep, + store_temp_beep, 0, 7), +} }; -/* Temp and in attr for the f8000 +/* Temp attr for the f8000 Note on the f8000 temp_ovt (crit) is used as max, and temp_high (max) is used as hysteresis value to clear alarms Also like the f71858fg its temperature indexes start at 0 */ -static struct sensor_device_attribute_2 f8000_in_temp_attr[] = { - SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), - SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), - SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), +static struct sensor_device_attribute_2 f8000_temp_attr[] = { SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0), SENSOR_ATTR_2(temp1_max, S_IRUGO|S_IWUSR, show_temp_crit, store_temp_crit, 0, 0), @@ -408,7 +435,6 @@ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = { SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO|S_IWUSR, show_temp_max, store_temp_max, 0, 1), SENSOR_ATTR_2(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 0, 5), - SENSOR_ATTR_2(temp2_type, S_IRUGO, show_temp_type, NULL, 0, 1), SENSOR_ATTR_2(temp2_fault, S_IRUGO, show_temp_fault, NULL, 0, 1), SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 0, 2), SENSOR_ATTR_2(temp3_max, S_IRUGO|S_IWUSR, show_temp_crit, @@ -419,6 +445,28 @@ static struct sensor_device_attribute_2 f8000_in_temp_attr[] = { SENSOR_ATTR_2(temp3_fault, S_IRUGO, show_temp_fault, NULL, 0, 2), }; +/* in attr for all models */ +static struct sensor_device_attribute_2 fxxxx_in_attr[] = { + SENSOR_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, 0), + SENSOR_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 0, 1), + SENSOR_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 0, 2), + SENSOR_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 0, 3), + SENSOR_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 0, 4), + SENSOR_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 0, 5), + SENSOR_ATTR_2(in6_input, S_IRUGO, show_in, NULL, 0, 6), + SENSOR_ATTR_2(in7_input, S_IRUGO, show_in, NULL, 0, 7), + SENSOR_ATTR_2(in8_input, S_IRUGO, show_in, NULL, 0, 8), +}; + +/* For models with in1 alarm capability */ +static struct sensor_device_attribute_2 fxxxx_in1_alarm_attr[] = { + SENSOR_ATTR_2(in1_max, S_IRUGO|S_IWUSR, show_in_max, store_in_max, + 0, 1), + SENSOR_ATTR_2(in1_beep, S_IRUGO|S_IWUSR, show_in_beep, store_in_beep, + 0, 1), + SENSOR_ATTR_2(in1_alarm, S_IRUGO, show_in_alarm, NULL, 0, 1), +}; + /* Fan / PWM attr common to all models */ static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { { SENSOR_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, 0), @@ -479,7 +527,7 @@ static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = { }; /* PWM attr for the f71862fg, fewer pwms and fewer zones per pwm than the - f71858fg / f71882fg / f71889fg */ + standard models */ static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = { SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, @@ -548,7 +596,87 @@ static struct sensor_device_attribute_2 f71862fg_auto_pwm_attr[] = { show_pwm_auto_point_temp_hyst, NULL, 3, 2), }; -/* PWM attr common to the f71858fg, f71882fg and f71889fg */ +/* PWM attr for the f71808e/f71869, almost identical to the f71862fg, but the + pwm setting when the temperature is above the pwmX_auto_point1_temp can be + programmed instead of being hardcoded to 0xff */ +static struct sensor_device_attribute_2 f71869_auto_pwm_attr[] = { + SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR, + show_pwm_auto_point_channel, + store_pwm_auto_point_channel, 0, 0), + SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IRUGO|S_IWUSR, + show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, + 0, 0), + SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IRUGO|S_IWUSR, + show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, + 1, 0), + SENSOR_ATTR_2(pwm1_auto_point3_pwm, S_IRUGO|S_IWUSR, + show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, + 4, 0), + SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IRUGO|S_IWUSR, + show_pwm_auto_point_temp, store_pwm_auto_point_temp, + 0, 0), + SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IRUGO|S_IWUSR, + show_pwm_auto_point_temp, store_pwm_auto_point_temp, + 3, 0), + SENSOR_ATTR_2(pwm1_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, + show_pwm_auto_point_temp_hyst, + store_pwm_auto_point_temp_hyst, + 0, 0), + SENSOR_ATTR_2(pwm1_auto_point2_temp_hyst, S_IRUGO, + show_pwm_auto_point_temp_hyst, NULL, 3, 0), + + SENSOR_ATTR_2(pwm2_auto_channels_temp, S_IRUGO|S_IWUSR, + show_pwm_auto_point_channel, + store_pwm_auto_point_channel, 0, 1), + SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IRUGO|S_IWUSR, + show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, + 0, 1), + SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IRUGO|S_IWUSR, + show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, + 1, 1), + SENSOR_ATTR_2(pwm2_auto_point3_pwm, S_IRUGO|S_IWUSR, + show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, + 4, 1), + SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IRUGO|S_IWUSR, + show_pwm_auto_point_temp, store_pwm_auto_point_temp, + 0, 1), + SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IRUGO|S_IWUSR, + show_pwm_auto_point_temp, store_pwm_auto_point_temp, + 3, 1), + SENSOR_ATTR_2(pwm2_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, + show_pwm_auto_point_temp_hyst, + store_pwm_auto_point_temp_hyst, + 0, 1), + SENSOR_ATTR_2(pwm2_auto_point2_temp_hyst, S_IRUGO, + show_pwm_auto_point_temp_hyst, NULL, 3, 1), + + SENSOR_ATTR_2(pwm3_auto_channels_temp, S_IRUGO|S_IWUSR, + show_pwm_auto_point_channel, + store_pwm_auto_point_channel, 0, 2), + SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IRUGO|S_IWUSR, + show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, + 0, 2), + SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IRUGO|S_IWUSR, + show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, + 1, 2), + SENSOR_ATTR_2(pwm3_auto_point3_pwm, S_IRUGO|S_IWUSR, + show_pwm_auto_point_pwm, store_pwm_auto_point_pwm, + 4, 2), + SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IRUGO|S_IWUSR, + show_pwm_auto_point_temp, store_pwm_auto_point_temp, + 0, 2), + SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IRUGO|S_IWUSR, + show_pwm_auto_point_temp, store_pwm_auto_point_temp, + 3, 2), + SENSOR_ATTR_2(pwm3_auto_point1_temp_hyst, S_IRUGO|S_IWUSR, + show_pwm_auto_point_temp_hyst, + store_pwm_auto_point_temp_hyst, + 0, 2), + SENSOR_ATTR_2(pwm3_auto_point2_temp_hyst, S_IRUGO, + show_pwm_auto_point_temp_hyst, NULL, 3, 2), +}; + +/* PWM attr for the standard models */ static struct sensor_device_attribute_2 fxxxx_auto_pwm_attr[4][14] = { { SENSOR_ATTR_2(pwm1_auto_channels_temp, S_IRUGO|S_IWUSR, show_pwm_auto_point_channel, @@ -943,16 +1071,16 @@ static u16 f71882fg_read_temp(struct f71882fg_data *data, int nr) static struct f71882fg_data *f71882fg_update_device(struct device *dev) { struct f71882fg_data *data = dev_get_drvdata(dev); - int nr, reg = 0, reg2; + int nr, reg, point; int nr_fans = (data->type == f71882fg) ? 4 : 3; - int nr_ins = (data->type == f71858fg || data->type == f8000) ? 3 : 9; + int nr_temps = (data->type == f71808e) ? 2 : 3; mutex_lock(&data->update_lock); /* Update once every 60 seconds */ if (time_after(jiffies, data->last_limits + 60 * HZ) || !data->valid) { - if (data->type == f71882fg || data->type == f71889fg) { + if (f71882fg_has_in1_alarm[data->type]) { data->in1_max = f71882fg_read8(data, F71882FG_REG_IN1_HIGH); data->in_beep = @@ -960,7 +1088,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) } /* Get High & boundary temps*/ - for (nr = data->temp_start; nr < 3 + data->temp_start; nr++) { + for (nr = data->temp_start; nr < nr_temps + data->temp_start; + nr++) { data->temp_ovt[nr] = f71882fg_read8(data, F71882FG_REG_TEMP_OVT(nr)); data->temp_high[nr] = f71882fg_read8(data, @@ -973,44 +1102,19 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) data->temp_hyst[1] = f71882fg_read8(data, F71882FG_REG_TEMP_HYST(1)); } + /* All but the f71858fg / f8000 have this register */ + if ((data->type != f71858fg) && (data->type != f8000)) { + reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE); + data->temp_type[1] = (reg & 0x02) ? 2 : 4; + data->temp_type[2] = (reg & 0x04) ? 2 : 4; + data->temp_type[3] = (reg & 0x08) ? 2 : 4; + } - if (data->type == f71862fg || data->type == f71882fg || - data->type == f71889fg) { + if (f71882fg_has_beep[data->type]) { data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP); data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP); - /* Have to hardcode type, because temp1 is special */ - reg = f71882fg_read8(data, F71882FG_REG_TEMP_TYPE); - data->temp_type[2] = (reg & 0x04) ? 2 : 4; - data->temp_type[3] = (reg & 0x08) ? 2 : 4; - } - /* Determine temp index 1 sensor type */ - if (data->type == f71889fg) { - reg2 = f71882fg_read8(data, F71882FG_REG_START); - switch ((reg2 & 0x60) >> 5) { - case 0x00: /* BJT / Thermistor */ - data->temp_type[1] = (reg & 0x02) ? 2 : 4; - break; - case 0x01: /* AMDSI */ - data->temp_type[1] = 5; - break; - case 0x02: /* PECI */ - case 0x03: /* Ibex Peak ?? Report as PECI for now */ - data->temp_type[1] = 6; - break; - } - } else { - reg2 = f71882fg_read8(data, F71882FG_REG_PECI); - if ((reg2 & 0x03) == 0x01) - data->temp_type[1] = 6; /* PECI */ - else if ((reg2 & 0x03) == 0x02) - data->temp_type[1] = 5; /* AMDSI */ - else if (data->type == f71862fg || - data->type == f71882fg) - data->temp_type[1] = (reg & 0x02) ? 2 : 4; - else /* f71858fg and f8000 only support BJT */ - data->temp_type[1] = 2; } data->pwm_enable = f71882fg_read8(data, @@ -1025,8 +1129,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) f71882fg_read8(data, F71882FG_REG_POINT_MAPPING(nr)); - if (data->type != f71862fg) { - int point; + switch (data->type) { + default: for (point = 0; point < 5; point++) { data->pwm_auto_point_pwm[nr][point] = f71882fg_read8(data, @@ -1039,7 +1143,14 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) F71882FG_REG_POINT_TEMP (nr, point)); } - } else { + break; + case f71808e: + case f71869: + data->pwm_auto_point_pwm[nr][0] = + f71882fg_read8(data, + F71882FG_REG_POINT_PWM(nr, 0)); + /* Fall through */ + case f71862fg: data->pwm_auto_point_pwm[nr][1] = f71882fg_read8(data, F71882FG_REG_POINT_PWM @@ -1056,6 +1167,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) f71882fg_read8(data, F71882FG_REG_POINT_TEMP (nr, 3)); + break; } } data->last_limits = jiffies; @@ -1067,7 +1179,8 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) F71882FG_REG_TEMP_STATUS); data->temp_diode_open = f71882fg_read8(data, F71882FG_REG_TEMP_DIODE_OPEN); - for (nr = data->temp_start; nr < 3 + data->temp_start; nr++) + for (nr = data->temp_start; nr < nr_temps + data->temp_start; + nr++) data->temp[nr] = f71882fg_read_temp(data, nr); data->fan_status = f71882fg_read8(data, @@ -1083,17 +1196,18 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) data->pwm[nr] = f71882fg_read8(data, F71882FG_REG_PWM(nr)); } - /* The f8000 can monitor 1 more fan, but has no pwm for it */ if (data->type == f8000) data->fan[3] = f71882fg_read16(data, F71882FG_REG_FAN(3)); - if (data->type == f71882fg || data->type == f71889fg) + + if (f71882fg_has_in1_alarm[data->type]) data->in_status = f71882fg_read8(data, F71882FG_REG_IN_STATUS); - for (nr = 0; nr < nr_ins; nr++) - data->in[nr] = f71882fg_read8(data, - F71882FG_REG_IN(nr)); + for (nr = 0; nr < F71882FG_MAX_INS; nr++) + if (f71882fg_has_in[data->type][nr]) + data->in[nr] = f71882fg_read8(data, + F71882FG_REG_IN(nr)); data->last_updated = jiffies; data->valid = 1; @@ -1882,7 +1996,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev, val /= 1000; - if (data->type == f71889fg) + if (data->auto_point_temp_signed) val = SENSORS_LIMIT(val, -128, 127); else val = SENSORS_LIMIT(val, 0, 127); @@ -1929,7 +2043,8 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) struct f71882fg_data *data; struct f71882fg_sio_data *sio_data = pdev->dev.platform_data; int err, i, nr_fans = (sio_data->type == f71882fg) ? 4 : 3; - u8 start_reg; + int nr_temps = (sio_data->type == f71808e) ? 2 : 3; + u8 start_reg, reg; data = kzalloc(sizeof(struct f71882fg_data), GFP_KERNEL); if (!data) @@ -1968,37 +2083,72 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) /* The f71858fg temperature alarms behave as the f8000 alarms in this mode */ err = f71882fg_create_sysfs_files(pdev, - f8000_in_temp_attr, - ARRAY_SIZE(f8000_in_temp_attr)); + f8000_temp_attr, + ARRAY_SIZE(f8000_temp_attr)); else err = f71882fg_create_sysfs_files(pdev, - f71858fg_in_temp_attr, - ARRAY_SIZE(f71858fg_in_temp_attr)); - break; - case f71882fg: - case f71889fg: - err = f71882fg_create_sysfs_files(pdev, - fxxxx_in1_alarm_attr, - ARRAY_SIZE(fxxxx_in1_alarm_attr)); - if (err) - goto exit_unregister_sysfs; - /* fall through! */ - case f71862fg: - err = f71882fg_create_sysfs_files(pdev, - fxxxx_in_temp_attr, - ARRAY_SIZE(fxxxx_in_temp_attr)); + f71858fg_temp_attr, + ARRAY_SIZE(f71858fg_temp_attr)); break; case f8000: err = f71882fg_create_sysfs_files(pdev, - f8000_in_temp_attr, - ARRAY_SIZE(f8000_in_temp_attr)); + f8000_temp_attr, + ARRAY_SIZE(f8000_temp_attr)); break; + default: + err = f71882fg_create_sysfs_files(pdev, + &fxxxx_temp_attr[0][0], + ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps); } if (err) goto exit_unregister_sysfs; + + if (f71882fg_has_beep[data->type]) { + err = f71882fg_create_sysfs_files(pdev, + &fxxxx_temp_beep_attr[0][0], + ARRAY_SIZE(fxxxx_temp_beep_attr[0]) + * nr_temps); + if (err) + goto exit_unregister_sysfs; + } + + for (i = 0; i < F71882FG_MAX_INS; i++) { + if (f71882fg_has_in[data->type][i]) { + err = device_create_file(&pdev->dev, + &fxxxx_in_attr[i].dev_attr); + if (err) + goto exit_unregister_sysfs; + } + } + if (f71882fg_has_in1_alarm[data->type]) { + err = f71882fg_create_sysfs_files(pdev, + fxxxx_in1_alarm_attr, + ARRAY_SIZE(fxxxx_in1_alarm_attr)); + if (err) + goto exit_unregister_sysfs; + } } if (start_reg & 0x02) { + switch (data->type) { + case f71808e: + case f71869: + /* These always have signed auto point temps */ + data->auto_point_temp_signed = 1; + /* Fall through to select correct fan/pwm reg bank! */ + case f71889fg: + case f71889ed: + reg = f71882fg_read8(data, F71882FG_REG_FAN_FAULT_T); + if (reg & F71882FG_FAN_NEG_TEMP_EN) + data->auto_point_temp_signed = 1; + /* Ensure banked pwm registers point to right bank */ + reg &= ~F71882FG_FAN_PROG_SEL; + f71882fg_write8(data, F71882FG_REG_FAN_FAULT_T, reg); + break; + default: + break; + } + data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); @@ -2013,8 +2163,11 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) case f71862fg: err = (data->pwm_enable & 0x15) != 0x15; break; + case f71808e: + case f71869: case f71882fg: case f71889fg: + case f71889ed: err = 0; break; case f8000: @@ -2034,8 +2187,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) if (err) goto exit_unregister_sysfs; - if (data->type == f71862fg || data->type == f71882fg || - data->type == f71889fg) { + if (f71882fg_has_beep[data->type]) { err = f71882fg_create_sysfs_files(pdev, fxxxx_fan_beep_attr, nr_fans); if (err) @@ -2043,11 +2195,42 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) } switch (data->type) { + case f71808e: + case f71869: + case f71889fg: + case f71889ed: + for (i = 0; i < nr_fans; i++) { + data->pwm_auto_point_mapping[i] = + f71882fg_read8(data, + F71882FG_REG_POINT_MAPPING(i)); + if ((data->pwm_auto_point_mapping[i] & 0x80) || + (data->pwm_auto_point_mapping[i] & 3) == 0) + break; + } + if (i != nr_fans) { + dev_warn(&pdev->dev, + "Auto pwm controlled by raw digital " + "data, disabling pwm auto_point " + "sysfs attributes\n"); + goto no_pwm_auto_point; + } + break; + default: + break; + } + + switch (data->type) { case f71862fg: err = f71882fg_create_sysfs_files(pdev, f71862fg_auto_pwm_attr, ARRAY_SIZE(f71862fg_auto_pwm_attr)); break; + case f71808e: + case f71869: + err = f71882fg_create_sysfs_files(pdev, + f71869_auto_pwm_attr, + ARRAY_SIZE(f71869_auto_pwm_attr)); + break; case f8000: err = f71882fg_create_sysfs_files(pdev, f8000_fan_attr, @@ -2058,23 +2241,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) f8000_auto_pwm_attr, ARRAY_SIZE(f8000_auto_pwm_attr)); break; - case f71889fg: - for (i = 0; i < nr_fans; i++) { - data->pwm_auto_point_mapping[i] = - f71882fg_read8(data, - F71882FG_REG_POINT_MAPPING(i)); - if (data->pwm_auto_point_mapping[i] & 0x80) - break; - } - if (i != nr_fans) { - dev_warn(&pdev->dev, - "Auto pwm controlled by raw digital " - "data, disabling pwm auto_point " - "sysfs attributes\n"); - break; - } - /* fall through */ - default: /* f71858fg / f71882fg */ + default: err = f71882fg_create_sysfs_files(pdev, &fxxxx_auto_pwm_attr[0][0], ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans); @@ -2082,6 +2249,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) if (err) goto exit_unregister_sysfs; +no_pwm_auto_point: for (i = 0; i < nr_fans; i++) dev_info(&pdev->dev, "Fan: %d is in %s mode\n", i + 1, (data->pwm_enable & (1 << 2 * i)) ? @@ -2108,7 +2276,8 @@ exit_free: static int f71882fg_remove(struct platform_device *pdev) { struct f71882fg_data *data = platform_get_drvdata(pdev); - int nr_fans = (data->type == f71882fg) ? 4 : 3; + int i, nr_fans = (data->type == f71882fg) ? 4 : 3; + int nr_temps = (data->type == f71808e) ? 2 : 3; u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); if (data->hwmon_dev) @@ -2121,29 +2290,39 @@ static int f71882fg_remove(struct platform_device *pdev) case f71858fg: if (data->temp_config & 0x10) f71882fg_remove_sysfs_files(pdev, - f8000_in_temp_attr, - ARRAY_SIZE(f8000_in_temp_attr)); + f8000_temp_attr, + ARRAY_SIZE(f8000_temp_attr)); else f71882fg_remove_sysfs_files(pdev, - f71858fg_in_temp_attr, - ARRAY_SIZE(f71858fg_in_temp_attr)); - break; - case f71882fg: - case f71889fg: - f71882fg_remove_sysfs_files(pdev, - fxxxx_in1_alarm_attr, - ARRAY_SIZE(fxxxx_in1_alarm_attr)); - /* fall through! */ - case f71862fg: - f71882fg_remove_sysfs_files(pdev, - fxxxx_in_temp_attr, - ARRAY_SIZE(fxxxx_in_temp_attr)); + f71858fg_temp_attr, + ARRAY_SIZE(f71858fg_temp_attr)); break; case f8000: f71882fg_remove_sysfs_files(pdev, - f8000_in_temp_attr, - ARRAY_SIZE(f8000_in_temp_attr)); + f8000_temp_attr, + ARRAY_SIZE(f8000_temp_attr)); break; + default: + f71882fg_remove_sysfs_files(pdev, + &fxxxx_temp_attr[0][0], + ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps); + } + if (f71882fg_has_beep[data->type]) { + f71882fg_remove_sysfs_files(pdev, + &fxxxx_temp_beep_attr[0][0], + ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps); + } + + for (i = 0; i < F71882FG_MAX_INS; i++) { + if (f71882fg_has_in[data->type][i]) { + device_remove_file(&pdev->dev, + &fxxxx_in_attr[i].dev_attr); + } + } + if (f71882fg_has_in1_alarm[data->type]) { + f71882fg_remove_sysfs_files(pdev, + fxxxx_in1_alarm_attr, + ARRAY_SIZE(fxxxx_in1_alarm_attr)); } } @@ -2151,10 +2330,10 @@ static int f71882fg_remove(struct platform_device *pdev) f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0], ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans); - if (data->type == f71862fg || data->type == f71882fg || - data->type == f71889fg) + if (f71882fg_has_beep[data->type]) { f71882fg_remove_sysfs_files(pdev, fxxxx_fan_beep_attr, nr_fans); + } switch (data->type) { case f71862fg: @@ -2162,6 +2341,12 @@ static int f71882fg_remove(struct platform_device *pdev) f71862fg_auto_pwm_attr, ARRAY_SIZE(f71862fg_auto_pwm_attr)); break; + case f71808e: + case f71869: + f71882fg_remove_sysfs_files(pdev, + f71869_auto_pwm_attr, + ARRAY_SIZE(f71869_auto_pwm_attr)); + break; case f8000: f71882fg_remove_sysfs_files(pdev, f8000_fan_attr, @@ -2170,7 +2355,7 @@ static int f71882fg_remove(struct platform_device *pdev) f8000_auto_pwm_attr, ARRAY_SIZE(f8000_auto_pwm_attr)); break; - default: /* f71858fg / f71882fg / f71889fg */ + default: f71882fg_remove_sysfs_files(pdev, &fxxxx_auto_pwm_attr[0][0], ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans); @@ -2200,18 +2385,27 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, devid = force_id ? force_id : superio_inw(sioaddr, SIO_REG_DEVID); switch (devid) { + case SIO_F71808E_ID: + sio_data->type = f71808e; + break; case SIO_F71858_ID: sio_data->type = f71858fg; break; case SIO_F71862_ID: sio_data->type = f71862fg; break; + case SIO_F71869_ID: + sio_data->type = f71869; + break; case SIO_F71882_ID: sio_data->type = f71882fg; break; case SIO_F71889_ID: sio_data->type = f71889fg; break; + case SIO_F71889E_ID: + sio_data->type = f71889ed; + break; case SIO_F8000_ID: sio_data->type = f8000; break; diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c new file mode 100644 index 000000000000..58eded27f385 --- /dev/null +++ b/drivers/hwmon/lineage-pem.c @@ -0,0 +1,586 @@ +/* + * Driver for Lineage Compact Power Line series of power entry modules. + * + * Copyright (C) 2010, 2011 Ericsson AB. + * + * Documentation: + * http://www.lineagepower.com/oem/pdf/CPLI2C.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> + +/* + * This driver supports various Lineage Compact Power Line DC/DC and AC/DC + * converters such as CP1800, CP2000AC, CP2000DC, CP2100DC, and others. + * + * The devices are nominally PMBus compliant. However, most standard PMBus + * commands are not supported. Specifically, all hardware monitoring and + * status reporting commands are non-standard. For this reason, a standard + * PMBus driver can not be used. + * + * All Lineage CPL devices have a built-in I2C bus master selector (PCA9541). + * To ensure device access, this driver should only be used as client driver + * to the pca9541 I2C master selector driver. + */ + +/* Command codes */ +#define PEM_OPERATION 0x01 +#define PEM_CLEAR_INFO_FLAGS 0x03 +#define PEM_VOUT_COMMAND 0x21 +#define PEM_VOUT_OV_FAULT_LIMIT 0x40 +#define PEM_READ_DATA_STRING 0xd0 +#define PEM_READ_INPUT_STRING 0xdc +#define PEM_READ_FIRMWARE_REV 0xdd +#define PEM_READ_RUN_TIMER 0xde +#define PEM_FAN_HI_SPEED 0xdf +#define PEM_FAN_NORMAL_SPEED 0xe0 +#define PEM_READ_FAN_SPEED 0xe1 + +/* offsets in data string */ +#define PEM_DATA_STATUS_2 0 +#define PEM_DATA_STATUS_1 1 +#define PEM_DATA_ALARM_2 2 +#define PEM_DATA_ALARM_1 3 +#define PEM_DATA_VOUT_LSB 4 +#define PEM_DATA_VOUT_MSB 5 +#define PEM_DATA_CURRENT 6 +#define PEM_DATA_TEMP 7 + +/* Virtual entries, to report constants */ +#define PEM_DATA_TEMP_MAX 10 +#define PEM_DATA_TEMP_CRIT 11 + +/* offsets in input string */ +#define PEM_INPUT_VOLTAGE 0 +#define PEM_INPUT_POWER_LSB 1 +#define PEM_INPUT_POWER_MSB 2 + +/* offsets in fan data */ +#define PEM_FAN_ADJUSTMENT 0 +#define PEM_FAN_FAN1 1 +#define PEM_FAN_FAN2 2 +#define PEM_FAN_FAN3 3 + +/* Status register bits */ +#define STS1_OUTPUT_ON (1 << 0) +#define STS1_LEDS_FLASHING (1 << 1) +#define STS1_EXT_FAULT (1 << 2) +#define STS1_SERVICE_LED_ON (1 << 3) +#define STS1_SHUTDOWN_OCCURRED (1 << 4) +#define STS1_INT_FAULT (1 << 5) +#define STS1_ISOLATION_TEST_OK (1 << 6) + +#define STS2_ENABLE_PIN_HI (1 << 0) +#define STS2_DATA_OUT_RANGE (1 << 1) +#define STS2_RESTARTED_OK (1 << 1) +#define STS2_ISOLATION_TEST_FAIL (1 << 3) +#define STS2_HIGH_POWER_CAP (1 << 4) +#define STS2_INVALID_INSTR (1 << 5) +#define STS2_WILL_RESTART (1 << 6) +#define STS2_PEC_ERR (1 << 7) + +/* Alarm register bits */ +#define ALRM1_VIN_OUT_LIMIT (1 << 0) +#define ALRM1_VOUT_OUT_LIMIT (1 << 1) +#define ALRM1_OV_VOLT_SHUTDOWN (1 << 2) +#define ALRM1_VIN_OVERCURRENT (1 << 3) +#define ALRM1_TEMP_WARNING (1 << 4) +#define ALRM1_TEMP_SHUTDOWN (1 << 5) +#define ALRM1_PRIMARY_FAULT (1 << 6) +#define ALRM1_POWER_LIMIT (1 << 7) + +#define ALRM2_5V_OUT_LIMIT (1 << 1) +#define ALRM2_TEMP_FAULT (1 << 2) +#define ALRM2_OV_LOW (1 << 3) +#define ALRM2_DCDC_TEMP_HIGH (1 << 4) +#define ALRM2_PRI_TEMP_HIGH (1 << 5) +#define ALRM2_NO_PRIMARY (1 << 6) +#define ALRM2_FAN_FAULT (1 << 7) + +#define FIRMWARE_REV_LEN 4 +#define DATA_STRING_LEN 9 +#define INPUT_STRING_LEN 5 /* 4 for most devices */ +#define FAN_SPEED_LEN 5 + +struct pem_data { + struct device *hwmon_dev; + + struct mutex update_lock; + bool valid; + bool fans_supported; + int input_length; + unsigned long last_updated; /* in jiffies */ + + u8 firmware_rev[FIRMWARE_REV_LEN]; + u8 data_string[DATA_STRING_LEN]; + u8 input_string[INPUT_STRING_LEN]; + u8 fan_speed[FAN_SPEED_LEN]; +}; + +static int pem_read_block(struct i2c_client *client, u8 command, u8 *data, + int data_len) +{ + u8 block_buffer[I2C_SMBUS_BLOCK_MAX]; + int result; + + result = i2c_smbus_read_block_data(client, command, block_buffer); + if (unlikely(result < 0)) + goto abort; + if (unlikely(result == 0xff || result != data_len)) { + result = -EIO; + goto abort; + } + memcpy(data, block_buffer, data_len); + result = 0; +abort: + return result; +} + +static struct pem_data *pem_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct pem_data *data = i2c_get_clientdata(client); + struct pem_data *ret = data; + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { + int result; + + /* Read data string */ + result = pem_read_block(client, PEM_READ_DATA_STRING, + data->data_string, + sizeof(data->data_string)); + if (unlikely(result < 0)) { + ret = ERR_PTR(result); + goto abort; + } + + /* Read input string */ + if (data->input_length) { + result = pem_read_block(client, PEM_READ_INPUT_STRING, + data->input_string, + data->input_length); + if (unlikely(result < 0)) { + ret = ERR_PTR(result); + goto abort; + } + } + + /* Read fan speeds */ + if (data->fans_supported) { + result = pem_read_block(client, PEM_READ_FAN_SPEED, + data->fan_speed, + sizeof(data->fan_speed)); + if (unlikely(result < 0)) { + ret = ERR_PTR(result); + goto abort; + } + } + + i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS); + + data->last_updated = jiffies; + data->valid = 1; + } +abort: + mutex_unlock(&data->update_lock); + return ret; +} + +static long pem_get_data(u8 *data, int len, int index) +{ + long val; + + switch (index) { + case PEM_DATA_VOUT_LSB: + val = (data[index] + (data[index+1] << 8)) * 5 / 2; + break; + case PEM_DATA_CURRENT: + val = data[index] * 200; + break; + case PEM_DATA_TEMP: + val = data[index] * 1000; + break; + case PEM_DATA_TEMP_MAX: + val = 97 * 1000; /* 97 degrees C per datasheet */ + break; + case PEM_DATA_TEMP_CRIT: + val = 107 * 1000; /* 107 degrees C per datasheet */ + break; + default: + WARN_ON_ONCE(1); + val = 0; + } + return val; +} + +static long pem_get_input(u8 *data, int len, int index) +{ + long val; + + switch (index) { + case PEM_INPUT_VOLTAGE: + if (len == INPUT_STRING_LEN) + val = (data[index] + (data[index+1] << 8) - 75) * 1000; + else + val = (data[index] - 75) * 1000; + break; + case PEM_INPUT_POWER_LSB: + if (len == INPUT_STRING_LEN) + index++; + val = (data[index] + (data[index+1] << 8)) * 1000000L; + break; + default: + WARN_ON_ONCE(1); + val = 0; + } + return val; +} + +static long pem_get_fan(u8 *data, int len, int index) +{ + long val; + + switch (index) { + case PEM_FAN_FAN1: + case PEM_FAN_FAN2: + case PEM_FAN_FAN3: + val = data[index] * 100; + break; + default: + WARN_ON_ONCE(1); + val = 0; + } + return val; +} + +/* + * Show boolean, either a fault or an alarm. + * .nr points to the register, .index is the bit mask to check + */ +static ssize_t pem_show_bool(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da); + struct pem_data *data = pem_update_device(dev); + u8 status; + + if (IS_ERR(data)) + return PTR_ERR(data); + + status = data->data_string[attr->nr] & attr->index; + return snprintf(buf, PAGE_SIZE, "%d\n", !!status); +} + +static ssize_t pem_show_data(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct pem_data *data = pem_update_device(dev); + long value; + + if (IS_ERR(data)) + return PTR_ERR(data); + + value = pem_get_data(data->data_string, sizeof(data->data_string), + attr->index); + + return snprintf(buf, PAGE_SIZE, "%ld\n", value); +} + +static ssize_t pem_show_input(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct pem_data *data = pem_update_device(dev); + long value; + + if (IS_ERR(data)) + return PTR_ERR(data); + + value = pem_get_input(data->input_string, sizeof(data->input_string), + attr->index); + + return snprintf(buf, PAGE_SIZE, "%ld\n", value); +} + +static ssize_t pem_show_fan(struct device *dev, struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct pem_data *data = pem_update_device(dev); + long value; + + if (IS_ERR(data)) + return PTR_ERR(data); + + value = pem_get_fan(data->fan_speed, sizeof(data->fan_speed), + attr->index); + + return snprintf(buf, PAGE_SIZE, "%ld\n", value); +} + +/* Voltages */ +static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, pem_show_data, NULL, + PEM_DATA_VOUT_LSB); +static SENSOR_DEVICE_ATTR_2(in1_alarm, S_IRUGO, pem_show_bool, NULL, + PEM_DATA_ALARM_1, ALRM1_VOUT_OUT_LIMIT); +static SENSOR_DEVICE_ATTR_2(in1_crit_alarm, S_IRUGO, pem_show_bool, NULL, + PEM_DATA_ALARM_1, ALRM1_OV_VOLT_SHUTDOWN); +static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, pem_show_input, NULL, + PEM_INPUT_VOLTAGE); +static SENSOR_DEVICE_ATTR_2(in2_alarm, S_IRUGO, pem_show_bool, NULL, + PEM_DATA_ALARM_1, + ALRM1_VIN_OUT_LIMIT | ALRM1_PRIMARY_FAULT); + +/* Currents */ +static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, pem_show_data, NULL, + PEM_DATA_CURRENT); +static SENSOR_DEVICE_ATTR_2(curr1_alarm, S_IRUGO, pem_show_bool, NULL, + PEM_DATA_ALARM_1, ALRM1_VIN_OVERCURRENT); + +/* Power */ +static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, pem_show_input, NULL, + PEM_INPUT_POWER_LSB); +static SENSOR_DEVICE_ATTR_2(power1_alarm, S_IRUGO, pem_show_bool, NULL, + PEM_DATA_ALARM_1, ALRM1_POWER_LIMIT); + +/* Fans */ +static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, pem_show_fan, NULL, + PEM_FAN_FAN1); +static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, pem_show_fan, NULL, + PEM_FAN_FAN2); +static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, pem_show_fan, NULL, + PEM_FAN_FAN3); +static SENSOR_DEVICE_ATTR_2(fan1_alarm, S_IRUGO, pem_show_bool, NULL, + PEM_DATA_ALARM_2, ALRM2_FAN_FAULT); + +/* Temperatures */ +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, pem_show_data, NULL, + PEM_DATA_TEMP); +static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, pem_show_data, NULL, + PEM_DATA_TEMP_MAX); +static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, pem_show_data, NULL, + PEM_DATA_TEMP_CRIT); +static SENSOR_DEVICE_ATTR_2(temp1_alarm, S_IRUGO, pem_show_bool, NULL, + PEM_DATA_ALARM_1, ALRM1_TEMP_WARNING); +static SENSOR_DEVICE_ATTR_2(temp1_crit_alarm, S_IRUGO, pem_show_bool, NULL, + PEM_DATA_ALARM_1, ALRM1_TEMP_SHUTDOWN); +static SENSOR_DEVICE_ATTR_2(temp1_fault, S_IRUGO, pem_show_bool, NULL, + PEM_DATA_ALARM_2, ALRM2_TEMP_FAULT); + +static struct attribute *pem_attributes[] = { + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in1_alarm.dev_attr.attr, + &sensor_dev_attr_in1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_in2_alarm.dev_attr.attr, + + &sensor_dev_attr_curr1_alarm.dev_attr.attr, + + &sensor_dev_attr_power1_alarm.dev_attr.attr, + + &sensor_dev_attr_fan1_alarm.dev_attr.attr, + + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_fault.dev_attr.attr, + + NULL, +}; + +static const struct attribute_group pem_group = { + .attrs = pem_attributes, +}; + +static struct attribute *pem_input_attributes[] = { + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_curr1_input.dev_attr.attr, + &sensor_dev_attr_power1_input.dev_attr.attr, +}; + +static const struct attribute_group pem_input_group = { + .attrs = pem_input_attributes, +}; + +static struct attribute *pem_fan_attributes[] = { + &sensor_dev_attr_fan1_input.dev_attr.attr, + &sensor_dev_attr_fan2_input.dev_attr.attr, + &sensor_dev_attr_fan3_input.dev_attr.attr, +}; + +static const struct attribute_group pem_fan_group = { + .attrs = pem_fan_attributes, +}; + +static int pem_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = client->adapter; + struct pem_data *data; + int ret; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BLOCK_DATA + | I2C_FUNC_SMBUS_WRITE_BYTE)) + return -ENODEV; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + /* + * We use the next two commands to determine if the device is really + * there. + */ + ret = pem_read_block(client, PEM_READ_FIRMWARE_REV, + data->firmware_rev, sizeof(data->firmware_rev)); + if (ret < 0) + goto out_kfree; + + ret = i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS); + if (ret < 0) + goto out_kfree; + + dev_info(&client->dev, "Firmware revision %d.%d.%d\n", + data->firmware_rev[0], data->firmware_rev[1], + data->firmware_rev[2]); + + /* Register sysfs hooks */ + ret = sysfs_create_group(&client->dev.kobj, &pem_group); + if (ret) + goto out_kfree; + + /* + * Check if input readings are supported. + * This is the case if we can read input data, + * and if the returned data is not all zeros. + * Note that input alarms are always supported. + */ + ret = pem_read_block(client, PEM_READ_INPUT_STRING, + data->input_string, + sizeof(data->input_string) - 1); + if (!ret && (data->input_string[0] || data->input_string[1] || + data->input_string[2])) + data->input_length = sizeof(data->input_string) - 1; + else if (ret < 0) { + /* Input string is one byte longer for some devices */ + ret = pem_read_block(client, PEM_READ_INPUT_STRING, + data->input_string, + sizeof(data->input_string)); + if (!ret && (data->input_string[0] || data->input_string[1] || + data->input_string[2] || data->input_string[3])) + data->input_length = sizeof(data->input_string); + } + ret = 0; + if (data->input_length) { + ret = sysfs_create_group(&client->dev.kobj, &pem_input_group); + if (ret) + goto out_remove_groups; + } + + /* + * Check if fan speed readings are supported. + * This is the case if we can read fan speed data, + * and if the returned data is not all zeros. + * Note that the fan alarm is always supported. + */ + ret = pem_read_block(client, PEM_READ_FAN_SPEED, + data->fan_speed, + sizeof(data->fan_speed)); + if (!ret && (data->fan_speed[0] || data->fan_speed[1] || + data->fan_speed[2] || data->fan_speed[3])) { + data->fans_supported = true; + ret = sysfs_create_group(&client->dev.kobj, &pem_fan_group); + if (ret) + goto out_remove_groups; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + ret = PTR_ERR(data->hwmon_dev); + goto out_remove_groups; + } + + return 0; + +out_remove_groups: + sysfs_remove_group(&client->dev.kobj, &pem_input_group); + sysfs_remove_group(&client->dev.kobj, &pem_fan_group); + sysfs_remove_group(&client->dev.kobj, &pem_group); +out_kfree: + kfree(data); + return ret; +} + +static int pem_remove(struct i2c_client *client) +{ + struct pem_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + + sysfs_remove_group(&client->dev.kobj, &pem_input_group); + sysfs_remove_group(&client->dev.kobj, &pem_fan_group); + sysfs_remove_group(&client->dev.kobj, &pem_group); + + kfree(data); + return 0; +} + +static const struct i2c_device_id pem_id[] = { + {"lineage_pem", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, pem_id); + +static struct i2c_driver pem_driver = { + .driver = { + .name = "lineage_pem", + }, + .probe = pem_probe, + .remove = pem_remove, + .id_table = pem_id, +}; + +static int __init pem_init(void) +{ + return i2c_add_driver(&pem_driver); +} + +static void __exit pem_exit(void) +{ + i2c_del_driver(&pem_driver); +} + +MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>"); +MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver"); +MODULE_LICENSE("GPL"); + +module_init(pem_init); +module_exit(pem_exit); diff --git a/drivers/hwmon/lis3lv02d_spi.c b/drivers/hwmon/lis3lv02d_spi.c index 2549de1de4e2..c1f8a8fbf694 100644 --- a/drivers/hwmon/lis3lv02d_spi.c +++ b/drivers/hwmon/lis3lv02d_spi.c @@ -16,6 +16,7 @@ #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/spi/spi.h> +#include <linux/pm.h> #include "lis3lv02d.h" @@ -88,9 +89,10 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi) return lis3lv02d_remove_fs(&lis3_dev); } -#ifdef CONFIG_PM -static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg) +#ifdef CONFIG_PM_SLEEP +static int lis3lv02d_spi_suspend(struct device *dev) { + struct spi_device *spi = to_spi_device(dev); struct lis3lv02d *lis3 = spi_get_drvdata(spi); if (!lis3->pdata || !lis3->pdata->wakeup_flags) @@ -99,8 +101,9 @@ static int lis3lv02d_spi_suspend(struct spi_device *spi, pm_message_t mesg) return 0; } -static int lis3lv02d_spi_resume(struct spi_device *spi) +static int lis3lv02d_spi_resume(struct device *dev) { + struct spi_device *spi = to_spi_device(dev); struct lis3lv02d *lis3 = spi_get_drvdata(spi); if (!lis3->pdata || !lis3->pdata->wakeup_flags) @@ -108,21 +111,19 @@ static int lis3lv02d_spi_resume(struct spi_device *spi) return 0; } - -#else -#define lis3lv02d_spi_suspend NULL -#define lis3lv02d_spi_resume NULL #endif +static SIMPLE_DEV_PM_OPS(lis3lv02d_spi_pm, lis3lv02d_spi_suspend, + lis3lv02d_spi_resume); + static struct spi_driver lis302dl_spi_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, + .pm = &lis3lv02d_spi_pm, }, .probe = lis302dl_spi_probe, .remove = __devexit_p(lis302dl_spi_remove), - .suspend = lis3lv02d_spi_suspend, - .resume = lis3lv02d_spi_resume, }; static int __init lis302dl_init(void) diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index d2cc28660816..cf47e6e476ed 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c @@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; enum chips { any_chip, lm85b, lm85c, adm1027, adt7463, adt7468, - emc6d100, emc6d102, emc6d103 + emc6d100, emc6d102, emc6d103, emc6d103s }; /* The LM85 registers */ @@ -283,10 +283,6 @@ struct lm85_zone { u8 hyst; /* Low limit hysteresis. (0-15) */ u8 range; /* Temp range, encoded */ s8 critical; /* "All fans ON" temp limit */ - u8 off_desired; /* Actual "off" temperature specified. Preserved - * to prevent "drift" as other autofan control - * values change. - */ u8 max_desired; /* Actual "max" temperature specified. Preserved * to prevent "drift" as other autofan control * values change. @@ -306,6 +302,8 @@ struct lm85_data { const int *freq_map; enum chips type; + bool has_vid5; /* true if VID5 is configured for ADT7463 or ADT7468 */ + struct mutex update_lock; int valid; /* !=0 if following fields are valid */ unsigned long last_reading; /* In jiffies */ @@ -352,6 +350,7 @@ static const struct i2c_device_id lm85_id[] = { { "emc6d101", emc6d100 }, { "emc6d102", emc6d102 }, { "emc6d103", emc6d103 }, + { "emc6d103s", emc6d103s }, { } }; MODULE_DEVICE_TABLE(i2c, lm85_id); @@ -420,8 +419,7 @@ static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr, struct lm85_data *data = lm85_update_device(dev); int vid; - if ((data->type == adt7463 || data->type == adt7468) && - (data->vid & 0x80)) { + if (data->has_vid5) { /* 6-pin VID (VRM 10) */ vid = vid_from_reg(data->vid & 0x3f, data->vrm); } else { @@ -891,7 +889,6 @@ static ssize_t set_temp_auto_temp_off(struct device *dev, mutex_lock(&data->update_lock); min = TEMP_FROM_REG(data->zone[nr].limit); - data->zone[nr].off_desired = TEMP_TO_REG(val); data->zone[nr].hyst = HYST_TO_REG(min - val); if (nr == 0 || nr == 1) { lm85_write_value(client, LM85_REG_AFAN_HYST1, @@ -934,18 +931,6 @@ static ssize_t set_temp_auto_temp_min(struct device *dev, ((data->zone[nr].range & 0x0f) << 4) | (data->pwm_freq[nr] & 0x07)); -/* Update temp_auto_hyst and temp_auto_off */ - data->zone[nr].hyst = HYST_TO_REG(TEMP_FROM_REG( - data->zone[nr].limit) - TEMP_FROM_REG( - data->zone[nr].off_desired)); - if (nr == 0 || nr == 1) { - lm85_write_value(client, LM85_REG_AFAN_HYST1, - (data->zone[0].hyst << 4) - | data->zone[1].hyst); - } else { - lm85_write_value(client, LM85_REG_AFAN_HYST2, - (data->zone[2].hyst << 4)); - } mutex_unlock(&data->update_lock); return count; } @@ -1084,13 +1069,7 @@ static struct attribute *lm85_attributes[] = { &sensor_dev_attr_pwm1_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_pwm2_auto_pwm_min.dev_attr.attr, &sensor_dev_attr_pwm3_auto_pwm_min.dev_attr.attr, - &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr, - &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr, - &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr, - &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr, - &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr, - &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr, &sensor_dev_attr_temp1_auto_temp_min.dev_attr.attr, &sensor_dev_attr_temp2_auto_temp_min.dev_attr.attr, &sensor_dev_attr_temp3_auto_temp_min.dev_attr.attr, @@ -1111,6 +1090,26 @@ static const struct attribute_group lm85_group = { .attrs = lm85_attributes, }; +static struct attribute *lm85_attributes_minctl[] = { + &sensor_dev_attr_pwm1_auto_pwm_minctl.dev_attr.attr, + &sensor_dev_attr_pwm2_auto_pwm_minctl.dev_attr.attr, + &sensor_dev_attr_pwm3_auto_pwm_minctl.dev_attr.attr, +}; + +static const struct attribute_group lm85_group_minctl = { + .attrs = lm85_attributes_minctl, +}; + +static struct attribute *lm85_attributes_temp_off[] = { + &sensor_dev_attr_temp1_auto_temp_off.dev_attr.attr, + &sensor_dev_attr_temp2_auto_temp_off.dev_attr.attr, + &sensor_dev_attr_temp3_auto_temp_off.dev_attr.attr, +}; + +static const struct attribute_group lm85_group_temp_off = { + .attrs = lm85_attributes_temp_off, +}; + static struct attribute *lm85_attributes_in4[] = { &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, @@ -1258,16 +1257,9 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info) case LM85_VERSTEP_EMC6D103_A1: type_name = "emc6d103"; break; - /* - * Registers apparently missing in EMC6D103S/EMC6D103:A2 - * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102 - * (according to the data sheets), but used unconditionally - * in the driver: 62[5:7], 6D[0:7], and 6E[0:7]. - * So skip EMC6D103S for now. case LM85_VERSTEP_EMC6D103S: type_name = "emc6d103s"; break; - */ } } else { dev_dbg(&adapter->dev, @@ -1280,6 +1272,19 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info) return 0; } +static void lm85_remove_files(struct i2c_client *client, struct lm85_data *data) +{ + sysfs_remove_group(&client->dev.kobj, &lm85_group); + if (data->type != emc6d103s) { + sysfs_remove_group(&client->dev.kobj, &lm85_group_minctl); + sysfs_remove_group(&client->dev.kobj, &lm85_group_temp_off); + } + if (!data->has_vid5) + sysfs_remove_group(&client->dev.kobj, &lm85_group_in4); + if (data->type == emc6d100) + sysfs_remove_group(&client->dev.kobj, &lm85_group_in567); +} + static int lm85_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1302,6 +1307,7 @@ static int lm85_probe(struct i2c_client *client, case emc6d100: case emc6d102: case emc6d103: + case emc6d103s: data->freq_map = adm1027_freq_map; break; default: @@ -1319,11 +1325,26 @@ static int lm85_probe(struct i2c_client *client, if (err) goto err_kfree; + /* minctl and temp_off exist on all chips except emc6d103s */ + if (data->type != emc6d103s) { + err = sysfs_create_group(&client->dev.kobj, &lm85_group_minctl); + if (err) + goto err_kfree; + err = sysfs_create_group(&client->dev.kobj, + &lm85_group_temp_off); + if (err) + goto err_kfree; + } + /* The ADT7463/68 have an optional VRM 10 mode where pin 21 is used as a sixth digital VID input rather than an analog input. */ - data->vid = lm85_read_value(client, LM85_REG_VID); - if (!((data->type == adt7463 || data->type == adt7468) && - (data->vid & 0x80))) + if (data->type == adt7463 || data->type == adt7468) { + u8 vid = lm85_read_value(client, LM85_REG_VID); + if (vid & 0x80) + data->has_vid5 = true; + } + + if (!data->has_vid5) if ((err = sysfs_create_group(&client->dev.kobj, &lm85_group_in4))) goto err_remove_files; @@ -1344,10 +1365,7 @@ static int lm85_probe(struct i2c_client *client, /* Error out and cleanup code */ err_remove_files: - sysfs_remove_group(&client->dev.kobj, &lm85_group); - sysfs_remove_group(&client->dev.kobj, &lm85_group_in4); - if (data->type == emc6d100) - sysfs_remove_group(&client->dev.kobj, &lm85_group_in567); + lm85_remove_files(client, data); err_kfree: kfree(data); return err; @@ -1357,10 +1375,7 @@ static int lm85_remove(struct i2c_client *client) { struct lm85_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); - sysfs_remove_group(&client->dev.kobj, &lm85_group); - sysfs_remove_group(&client->dev.kobj, &lm85_group_in4); - if (data->type == emc6d100) - sysfs_remove_group(&client->dev.kobj, &lm85_group_in567); + lm85_remove_files(client, data); kfree(data); return 0; } @@ -1457,11 +1472,8 @@ static struct lm85_data *lm85_update_device(struct device *dev) lm85_read_value(client, LM85_REG_FAN(i)); } - if (!((data->type == adt7463 || data->type == adt7468) && - (data->vid & 0x80))) { - data->in[4] = lm85_read_value(client, - LM85_REG_IN(4)); - } + if (!data->has_vid5) + data->in[4] = lm85_read_value(client, LM85_REG_IN(4)); if (data->type == adt7468) data->cfg5 = lm85_read_value(client, ADT7468_REG_CFG5); @@ -1487,7 +1499,8 @@ static struct lm85_data *lm85_update_device(struct device *dev) /* More alarm bits */ data->alarms |= lm85_read_value(client, EMC6D100_REG_ALARM3) << 16; - } else if (data->type == emc6d102 || data->type == emc6d103) { + } else if (data->type == emc6d102 || data->type == emc6d103 || + data->type == emc6d103s) { /* Have to read LSB bits after the MSB ones because the reading of the MSB bits has frozen the LSBs (backward from the ADM1027). @@ -1528,8 +1541,7 @@ static struct lm85_data *lm85_update_device(struct device *dev) lm85_read_value(client, LM85_REG_FAN_MIN(i)); } - if (!((data->type == adt7463 || data->type == adt7468) && - (data->vid & 0x80))) { + if (!data->has_vid5) { data->in_min[4] = lm85_read_value(client, LM85_REG_IN_MIN(4)); data->in_max[4] = lm85_read_value(client, @@ -1573,17 +1585,19 @@ static struct lm85_data *lm85_update_device(struct device *dev) } } - i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1); - data->autofan[0].min_off = (i & 0x20) != 0; - data->autofan[1].min_off = (i & 0x40) != 0; - data->autofan[2].min_off = (i & 0x80) != 0; + if (data->type != emc6d103s) { + i = lm85_read_value(client, LM85_REG_AFAN_SPIKE1); + data->autofan[0].min_off = (i & 0x20) != 0; + data->autofan[1].min_off = (i & 0x40) != 0; + data->autofan[2].min_off = (i & 0x80) != 0; - i = lm85_read_value(client, LM85_REG_AFAN_HYST1); - data->zone[0].hyst = i >> 4; - data->zone[1].hyst = i & 0x0f; + i = lm85_read_value(client, LM85_REG_AFAN_HYST1); + data->zone[0].hyst = i >> 4; + data->zone[1].hyst = i & 0x0f; - i = lm85_read_value(client, LM85_REG_AFAN_HYST2); - data->zone[2].hyst = i >> 4; + i = lm85_read_value(client, LM85_REG_AFAN_HYST2); + data->zone[2].hyst = i >> 4; + } data->last_config = jiffies; } /* last_config */ diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c new file mode 100644 index 000000000000..4ac06b75aa60 --- /dev/null +++ b/drivers/hwmon/ltc4151.c @@ -0,0 +1,256 @@ +/* + * Driver for Linear Technology LTC4151 High Voltage I2C Current + * and Voltage Monitor + * + * Copyright (C) 2011 AppearTV AS + * + * Derived from: + * + * Driver for Linear Technology LTC4261 I2C Negative Voltage Hot + * Swap Controller + * Copyright (C) 2010 Ericsson AB. + * + * Datasheet: http://www.linear.com/docs/Datasheet/4151fc.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> + +/* chip registers */ +#define LTC4151_SENSE_H 0x00 +#define LTC4151_SENSE_L 0x01 +#define LTC4151_VIN_H 0x02 +#define LTC4151_VIN_L 0x03 +#define LTC4151_ADIN_H 0x04 +#define LTC4151_ADIN_L 0x05 + +struct ltc4151_data { + struct device *hwmon_dev; + + struct mutex update_lock; + bool valid; + unsigned long last_updated; /* in jiffies */ + + /* Registers */ + u8 regs[6]; +}; + +static struct ltc4151_data *ltc4151_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct ltc4151_data *data = i2c_get_clientdata(client); + struct ltc4151_data *ret = data; + + mutex_lock(&data->update_lock); + + /* + * The chip's A/D updates 6 times per second + * (Conversion Rate 6 - 9 Hz) + */ + if (time_after(jiffies, data->last_updated + HZ / 6) || !data->valid) { + int i; + + dev_dbg(&client->dev, "Starting ltc4151 update\n"); + + /* Read all registers */ + for (i = 0; i < ARRAY_SIZE(data->regs); i++) { + int val; + + val = i2c_smbus_read_byte_data(client, i); + if (unlikely(val < 0)) { + dev_dbg(dev, + "Failed to read ADC value: error %d\n", + val); + ret = ERR_PTR(val); + goto abort; + } + data->regs[i] = val; + } + data->last_updated = jiffies; + data->valid = 1; + } +abort: + mutex_unlock(&data->update_lock); + return ret; +} + +/* Return the voltage from the given register in mV */ +static int ltc4151_get_value(struct ltc4151_data *data, u8 reg) +{ + u32 val; + + val = (data->regs[reg] << 4) + (data->regs[reg + 1] >> 4); + + switch (reg) { + case LTC4151_ADIN_H: + /* 500uV resolution. Convert to mV. */ + val = val * 500 / 1000; + break; + case LTC4151_SENSE_H: + /* + * 20uV resolution. Convert to current as measured with + * an 1 mOhm sense resistor, in mA. + */ + val = val * 20; + break; + case LTC4151_VIN_H: + /* 25 mV per increment */ + val = val * 25; + break; + default: + /* If we get here, the developer messed up */ + WARN_ON_ONCE(1); + val = 0; + break; + } + + return val; +} + +static ssize_t ltc4151_show_value(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct ltc4151_data *data = ltc4151_update_device(dev); + int value; + + if (IS_ERR(data)) + return PTR_ERR(data); + + value = ltc4151_get_value(data, attr->index); + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +/* + * Input voltages. + */ +static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, \ + ltc4151_show_value, NULL, LTC4151_VIN_H); +static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, \ + ltc4151_show_value, NULL, LTC4151_ADIN_H); + +/* Currents (via sense resistor) */ +static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, \ + ltc4151_show_value, NULL, LTC4151_SENSE_H); + +/* Finally, construct an array of pointers to members of the above objects, + * as required for sysfs_create_group() + */ +static struct attribute *ltc4151_attributes[] = { + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + + &sensor_dev_attr_curr1_input.dev_attr.attr, + + NULL, +}; + +static const struct attribute_group ltc4151_group = { + .attrs = ltc4151_attributes, +}; + +static int ltc4151_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = client->adapter; + struct ltc4151_data *data; + int ret; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto out_kzalloc; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + /* Register sysfs hooks */ + ret = sysfs_create_group(&client->dev.kobj, <c4151_group); + if (ret) + goto out_sysfs_create_group; + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + ret = PTR_ERR(data->hwmon_dev); + goto out_hwmon_device_register; + } + + return 0; + +out_hwmon_device_register: + sysfs_remove_group(&client->dev.kobj, <c4151_group); +out_sysfs_create_group: + kfree(data); +out_kzalloc: + return ret; +} + +static int ltc4151_remove(struct i2c_client *client) +{ + struct ltc4151_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, <c4151_group); + + kfree(data); + + return 0; +} + +static const struct i2c_device_id ltc4151_id[] = { + { "ltc4151", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ltc4151_id); + +/* This is the driver that will be inserted */ +static struct i2c_driver ltc4151_driver = { + .driver = { + .name = "ltc4151", + }, + .probe = ltc4151_probe, + .remove = ltc4151_remove, + .id_table = ltc4151_id, +}; + +static int __init ltc4151_init(void) +{ + return i2c_add_driver(<c4151_driver); +} + +static void __exit ltc4151_exit(void) +{ + i2c_del_driver(<c4151_driver); +} + +MODULE_AUTHOR("Per Dalen <per.dalen@appeartv.com>"); +MODULE_DESCRIPTION("LTC4151 driver"); +MODULE_LICENSE("GPL"); + +module_init(ltc4151_init); +module_exit(ltc4151_exit); diff --git a/drivers/hwmon/max16064.c b/drivers/hwmon/max16064.c new file mode 100644 index 000000000000..1d6d717060d3 --- /dev/null +++ b/drivers/hwmon/max16064.c @@ -0,0 +1,91 @@ +/* + * Hardware monitoring driver for Maxim MAX16064 + * + * Copyright (c) 2011 Ericsson AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include "pmbus.h" + +static struct pmbus_driver_info max16064_info = { + .pages = 4, + .direct[PSC_VOLTAGE_IN] = true, + .direct[PSC_VOLTAGE_OUT] = true, + .direct[PSC_TEMPERATURE] = true, + .m[PSC_VOLTAGE_IN] = 19995, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -1, + .m[PSC_VOLTAGE_OUT] = 19995, + .b[PSC_VOLTAGE_OUT] = 0, + .R[PSC_VOLTAGE_OUT] = -1, + .m[PSC_TEMPERATURE] = -7612, + .b[PSC_TEMPERATURE] = 335, + .R[PSC_TEMPERATURE] = -3, + .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_TEMP + | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_TEMP, + .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, + .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, + .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT, +}; + +static int max16064_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + return pmbus_do_probe(client, id, &max16064_info); +} + +static int max16064_remove(struct i2c_client *client) +{ + return pmbus_do_remove(client); +} + +static const struct i2c_device_id max16064_id[] = { + {"max16064", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, max16064_id); + +/* This is the driver that will be inserted */ +static struct i2c_driver max16064_driver = { + .driver = { + .name = "max16064", + }, + .probe = max16064_probe, + .remove = max16064_remove, + .id_table = max16064_id, +}; + +static int __init max16064_init(void) +{ + return i2c_add_driver(&max16064_driver); +} + +static void __exit max16064_exit(void) +{ + i2c_del_driver(&max16064_driver); +} + +MODULE_AUTHOR("Guenter Roeck"); +MODULE_DESCRIPTION("PMBus driver for Maxim MAX16064"); +MODULE_LICENSE("GPL"); +module_init(max16064_init); +module_exit(max16064_exit); diff --git a/drivers/hwmon/max34440.c b/drivers/hwmon/max34440.c new file mode 100644 index 000000000000..992b701b4c5e --- /dev/null +++ b/drivers/hwmon/max34440.c @@ -0,0 +1,199 @@ +/* + * Hardware monitoring driver for Maxim MAX34440/MAX34441 + * + * Copyright (c) 2011 Ericsson AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include "pmbus.h" + +enum chips { max34440, max34441 }; + +#define MAX34440_STATUS_OC_WARN (1 << 0) +#define MAX34440_STATUS_OC_FAULT (1 << 1) +#define MAX34440_STATUS_OT_FAULT (1 << 5) +#define MAX34440_STATUS_OT_WARN (1 << 6) + +static int max34440_get_status(struct i2c_client *client, int page, int reg) +{ + int ret; + int mfg_status; + + ret = pmbus_set_page(client, page); + if (ret < 0) + return ret; + + switch (reg) { + case PMBUS_STATUS_IOUT: + mfg_status = pmbus_read_word_data(client, 0, + PMBUS_STATUS_MFR_SPECIFIC); + if (mfg_status < 0) + return mfg_status; + if (mfg_status & MAX34440_STATUS_OC_WARN) + ret |= PB_IOUT_OC_WARNING; + if (mfg_status & MAX34440_STATUS_OC_FAULT) + ret |= PB_IOUT_OC_FAULT; + break; + case PMBUS_STATUS_TEMPERATURE: + mfg_status = pmbus_read_word_data(client, 0, + PMBUS_STATUS_MFR_SPECIFIC); + if (mfg_status < 0) + return mfg_status; + if (mfg_status & MAX34440_STATUS_OT_WARN) + ret |= PB_TEMP_OT_WARNING; + if (mfg_status & MAX34440_STATUS_OT_FAULT) + ret |= PB_TEMP_OT_FAULT; + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static struct pmbus_driver_info max34440_info[] = { + [max34440] = { + .pages = 14, + .direct[PSC_VOLTAGE_IN] = true, + .direct[PSC_VOLTAGE_OUT] = true, + .direct[PSC_TEMPERATURE] = true, + .direct[PSC_CURRENT_OUT] = true, + .m[PSC_VOLTAGE_IN] = 1, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = 3, /* R = 0 in datasheet reflects mV */ + .m[PSC_VOLTAGE_OUT] = 1, + .b[PSC_VOLTAGE_OUT] = 0, + .R[PSC_VOLTAGE_OUT] = 3, /* R = 0 in datasheet reflects mV */ + .m[PSC_CURRENT_OUT] = 1, + .b[PSC_CURRENT_OUT] = 0, + .R[PSC_CURRENT_OUT] = 3, /* R = 0 in datasheet reflects mA */ + .m[PSC_TEMPERATURE] = 1, + .b[PSC_TEMPERATURE] = 0, + .R[PSC_TEMPERATURE] = 2, + .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[5] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[6] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[7] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[8] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[9] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[12] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[13] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .get_status = max34440_get_status, + }, + [max34441] = { + .pages = 12, + .direct[PSC_VOLTAGE_IN] = true, + .direct[PSC_VOLTAGE_OUT] = true, + .direct[PSC_TEMPERATURE] = true, + .direct[PSC_CURRENT_OUT] = true, + .direct[PSC_FAN] = true, + .m[PSC_VOLTAGE_IN] = 1, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = 3, + .m[PSC_VOLTAGE_OUT] = 1, + .b[PSC_VOLTAGE_OUT] = 0, + .R[PSC_VOLTAGE_OUT] = 3, + .m[PSC_CURRENT_OUT] = 1, + .b[PSC_CURRENT_OUT] = 0, + .R[PSC_CURRENT_OUT] = 3, + .m[PSC_TEMPERATURE] = 1, + .b[PSC_TEMPERATURE] = 0, + .R[PSC_TEMPERATURE] = 2, + .m[PSC_FAN] = 1, + .b[PSC_FAN] = 0, + .R[PSC_FAN] = 0, + .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[3] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[4] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT + | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT, + .func[5] = PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12, + .func[6] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[7] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[8] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[9] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[10] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .func[11] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + .get_status = max34440_get_status, + }, +}; + +static int max34440_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + return pmbus_do_probe(client, id, &max34440_info[id->driver_data]); +} + +static int max34440_remove(struct i2c_client *client) +{ + return pmbus_do_remove(client); +} + +static const struct i2c_device_id max34440_id[] = { + {"max34440", max34440}, + {"max34441", max34441}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, max34440_id); + +/* This is the driver that will be inserted */ +static struct i2c_driver max34440_driver = { + .driver = { + .name = "max34440", + }, + .probe = max34440_probe, + .remove = max34440_remove, + .id_table = max34440_id, +}; + +static int __init max34440_init(void) +{ + return i2c_add_driver(&max34440_driver); +} + +static void __exit max34440_exit(void) +{ + i2c_del_driver(&max34440_driver); +} + +MODULE_AUTHOR("Guenter Roeck"); +MODULE_DESCRIPTION("PMBus driver for Maxim MAX34440/MAX34441"); +MODULE_LICENSE("GPL"); +module_init(max34440_init); +module_exit(max34440_exit); diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c new file mode 100644 index 000000000000..f20d9978ee78 --- /dev/null +++ b/drivers/hwmon/max6639.c @@ -0,0 +1,653 @@ +/* + * max6639.c - Support for Maxim MAX6639 + * + * 2-Channel Temperature Monitor with Dual PWM Fan-Speed Controller + * + * Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de> + * + * based on the initial MAX6639 support from semptian.net + * by He Changqing <hechangqing@semptian.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/jiffies.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/mutex.h> +#include <linux/i2c/max6639.h> + +/* Addresses to scan */ +static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END }; + +/* The MAX6639 registers, valid channel numbers: 0, 1 */ +#define MAX6639_REG_TEMP(ch) (0x00 + (ch)) +#define MAX6639_REG_STATUS 0x02 +#define MAX6639_REG_OUTPUT_MASK 0x03 +#define MAX6639_REG_GCONFIG 0x04 +#define MAX6639_REG_TEMP_EXT(ch) (0x05 + (ch)) +#define MAX6639_REG_ALERT_LIMIT(ch) (0x08 + (ch)) +#define MAX6639_REG_OT_LIMIT(ch) (0x0A + (ch)) +#define MAX6639_REG_THERM_LIMIT(ch) (0x0C + (ch)) +#define MAX6639_REG_FAN_CONFIG1(ch) (0x10 + (ch) * 4) +#define MAX6639_REG_FAN_CONFIG2a(ch) (0x11 + (ch) * 4) +#define MAX6639_REG_FAN_CONFIG2b(ch) (0x12 + (ch) * 4) +#define MAX6639_REG_FAN_CONFIG3(ch) (0x13 + (ch) * 4) +#define MAX6639_REG_FAN_CNT(ch) (0x20 + (ch)) +#define MAX6639_REG_TARGET_CNT(ch) (0x22 + (ch)) +#define MAX6639_REG_FAN_PPR(ch) (0x24 + (ch)) +#define MAX6639_REG_TARGTDUTY(ch) (0x26 + (ch)) +#define MAX6639_REG_FAN_START_TEMP(ch) (0x28 + (ch)) +#define MAX6639_REG_DEVID 0x3D +#define MAX6639_REG_MANUID 0x3E +#define MAX6639_REG_DEVREV 0x3F + +/* Register bits */ +#define MAX6639_GCONFIG_STANDBY 0x80 +#define MAX6639_GCONFIG_POR 0x40 +#define MAX6639_GCONFIG_DISABLE_TIMEOUT 0x20 +#define MAX6639_GCONFIG_CH2_LOCAL 0x10 +#define MAX6639_GCONFIG_PWM_FREQ_HI 0x08 + +#define MAX6639_FAN_CONFIG1_PWM 0x80 + +#define MAX6639_FAN_CONFIG3_THERM_FULL_SPEED 0x40 + +static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 }; + +#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \ + (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val))) +#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255) + +/* + * Client data (each client gets its own) + */ +struct max6639_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* !=0 if following fields are valid */ + unsigned long last_updated; /* In jiffies */ + + /* Register values sampled regularly */ + u16 temp[2]; /* Temperature, in 1/8 C, 0..255 C */ + bool temp_fault[2]; /* Detected temperature diode failure */ + u8 fan[2]; /* Register value: TACH count for fans >=30 */ + u8 status; /* Detected channel alarms and fan failures */ + + /* Register values only written to */ + u8 pwm[2]; /* Register value: Duty cycle 0..120 */ + u8 temp_therm[2]; /* THERM Temperature, 0..255 C (->_max) */ + u8 temp_alert[2]; /* ALERT Temperature, 0..255 C (->_crit) */ + u8 temp_ot[2]; /* OT Temperature, 0..255 C (->_emergency) */ + + /* Register values initialized only once */ + u8 ppr; /* Pulses per rotation 0..3 for 1..4 ppr */ + u8 rpm_range; /* Index in above rpm_ranges table */ +}; + +static struct max6639_data *max6639_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max6639_data *data = i2c_get_clientdata(client); + struct max6639_data *ret = data; + int i; + int status_reg; + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) { + int res; + + dev_dbg(&client->dev, "Starting max6639 update\n"); + + status_reg = i2c_smbus_read_byte_data(client, + MAX6639_REG_STATUS); + if (status_reg < 0) { + ret = ERR_PTR(status_reg); + goto abort; + } + + data->status = status_reg; + + for (i = 0; i < 2; i++) { + res = i2c_smbus_read_byte_data(client, + MAX6639_REG_FAN_CNT(i)); + if (res < 0) { + ret = ERR_PTR(res); + goto abort; + } + data->fan[i] = res; + + res = i2c_smbus_read_byte_data(client, + MAX6639_REG_TEMP_EXT(i)); + if (res < 0) { + ret = ERR_PTR(res); + goto abort; + } + data->temp[i] = res >> 5; + data->temp_fault[i] = res & 0x01; + + res = i2c_smbus_read_byte_data(client, + MAX6639_REG_TEMP(i)); + if (res < 0) { + ret = ERR_PTR(res); + goto abort; + } + data->temp[i] |= res << 3; + } + + data->last_updated = jiffies; + data->valid = 1; + } +abort: + mutex_unlock(&data->update_lock); + + return ret; +} + +static ssize_t show_temp_input(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + long temp; + struct max6639_data *data = max6639_update_device(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + + if (IS_ERR(data)) + return PTR_ERR(data); + + temp = data->temp[attr->index] * 125; + return sprintf(buf, "%ld\n", temp); +} + +static ssize_t show_temp_fault(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct max6639_data *data = max6639_update_device(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + + if (IS_ERR(data)) + return PTR_ERR(data); + + return sprintf(buf, "%d\n", data->temp_fault[attr->index]); +} + +static ssize_t show_temp_max(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max6639_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + + return sprintf(buf, "%d\n", (data->temp_therm[attr->index] * 1000)); +} + +static ssize_t set_temp_max(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max6639_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + unsigned long val; + int res; + + res = strict_strtoul(buf, 10, &val); + if (res) + return res; + + mutex_lock(&data->update_lock); + data->temp_therm[attr->index] = TEMP_LIMIT_TO_REG(val); + i2c_smbus_write_byte_data(client, + MAX6639_REG_THERM_LIMIT(attr->index), + data->temp_therm[attr->index]); + mutex_unlock(&data->update_lock); + return count; +} + +static ssize_t show_temp_crit(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max6639_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + + return sprintf(buf, "%d\n", (data->temp_alert[attr->index] * 1000)); +} + +static ssize_t set_temp_crit(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max6639_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + unsigned long val; + int res; + + res = strict_strtoul(buf, 10, &val); + if (res) + return res; + + mutex_lock(&data->update_lock); + data->temp_alert[attr->index] = TEMP_LIMIT_TO_REG(val); + i2c_smbus_write_byte_data(client, + MAX6639_REG_ALERT_LIMIT(attr->index), + data->temp_alert[attr->index]); + mutex_unlock(&data->update_lock); + return count; +} + +static ssize_t show_temp_emergency(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max6639_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + + return sprintf(buf, "%d\n", (data->temp_ot[attr->index] * 1000)); +} + +static ssize_t set_temp_emergency(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max6639_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + unsigned long val; + int res; + + res = strict_strtoul(buf, 10, &val); + if (res) + return res; + + mutex_lock(&data->update_lock); + data->temp_ot[attr->index] = TEMP_LIMIT_TO_REG(val); + i2c_smbus_write_byte_data(client, + MAX6639_REG_OT_LIMIT(attr->index), + data->temp_ot[attr->index]); + mutex_unlock(&data->update_lock); + return count; +} + +static ssize_t show_pwm(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max6639_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + + return sprintf(buf, "%d\n", data->pwm[attr->index] * 255 / 120); +} + +static ssize_t set_pwm(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct max6639_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + unsigned long val; + int res; + + res = strict_strtoul(buf, 10, &val); + if (res) + return res; + + val = SENSORS_LIMIT(val, 0, 255); + + mutex_lock(&data->update_lock); + data->pwm[attr->index] = (u8)(val * 120 / 255); + i2c_smbus_write_byte_data(client, + MAX6639_REG_TARGTDUTY(attr->index), + data->pwm[attr->index]); + mutex_unlock(&data->update_lock); + return count; +} + +static ssize_t show_fan_input(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct max6639_data *data = max6639_update_device(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + + if (IS_ERR(data)) + return PTR_ERR(data); + + return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index], + data->ppr, data->rpm_range)); +} + +static ssize_t show_alarm(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct max6639_data *data = max6639_update_device(dev); + struct sensor_device_attribute *attr = to_sensor_dev_attr(dev_attr); + + if (IS_ERR(data)) + return PTR_ERR(data); + + return sprintf(buf, "%d\n", !!(data->status & (1 << attr->index))); +} + +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp_input, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0); +static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max, + set_temp_max, 0); +static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, + set_temp_max, 1); +static SENSOR_DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_crit, + set_temp_crit, 0); +static SENSOR_DEVICE_ATTR(temp2_crit, S_IWUSR | S_IRUGO, show_temp_crit, + set_temp_crit, 1); +static SENSOR_DEVICE_ATTR(temp1_emergency, S_IWUSR | S_IRUGO, + show_temp_emergency, set_temp_emergency, 0); +static SENSOR_DEVICE_ATTR(temp2_emergency, S_IWUSR | S_IRUGO, + show_temp_emergency, set_temp_emergency, 1); +static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0); +static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1); +static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0); +static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1); +static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, show_alarm, NULL, 1); +static SENSOR_DEVICE_ATTR(fan2_fault, S_IRUGO, show_alarm, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 3); +static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 2); +static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 7); +static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 6); +static SENSOR_DEVICE_ATTR(temp1_emergency_alarm, S_IRUGO, show_alarm, NULL, 5); +static SENSOR_DEVICE_ATTR(temp2_emergency_alarm, S_IRUGO, show_alarm, NULL, 4); + + +static struct attribute *max6639_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp1_fault.dev_attr.attr, + &sensor_dev_attr_temp2_fault.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp2_max.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp2_crit.dev_attr.attr, + &sensor_dev_attr_temp1_emergency.dev_attr.attr, + &sensor_dev_attr_temp2_emergency.dev_attr.attr, + &sensor_dev_attr_pwm1.dev_attr.attr, + &sensor_dev_attr_pwm2.dev_attr.attr, + &sensor_dev_attr_fan1_input.dev_attr.attr, + &sensor_dev_attr_fan2_input.dev_attr.attr, + &sensor_dev_attr_fan1_fault.dev_attr.attr, + &sensor_dev_attr_fan2_fault.dev_attr.attr, + &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr, + &sensor_dev_attr_temp1_emergency_alarm.dev_attr.attr, + &sensor_dev_attr_temp2_emergency_alarm.dev_attr.attr, + NULL +}; + +static const struct attribute_group max6639_group = { + .attrs = max6639_attributes, +}; + +/* + * returns respective index in rpm_ranges table + * 1 by default on invalid range + */ +static int rpm_range_to_reg(int range) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(rpm_ranges); i++) { + if (rpm_ranges[i] == range) + return i; + } + + return 1; /* default: 4000 RPM */ +} + +static int max6639_init_client(struct i2c_client *client) +{ + struct max6639_data *data = i2c_get_clientdata(client); + struct max6639_platform_data *max6639_info = + client->dev.platform_data; + int i = 0; + int rpm_range = 1; /* default: 4000 RPM */ + int err = 0; + + /* Reset chip to default values, see below for GCONFIG setup */ + err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG, + MAX6639_GCONFIG_POR); + if (err) + goto exit; + + /* Fans pulse per revolution is 2 by default */ + if (max6639_info && max6639_info->ppr > 0 && + max6639_info->ppr < 5) + data->ppr = max6639_info->ppr; + else + data->ppr = 2; + data->ppr -= 1; + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_FAN_PPR(i), + data->ppr << 5); + if (err) + goto exit; + + if (max6639_info) + rpm_range = rpm_range_to_reg(max6639_info->rpm_range); + data->rpm_range = rpm_range; + + for (i = 0; i < 2; i++) { + + /* Fans config PWM, RPM */ + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_FAN_CONFIG1(i), + MAX6639_FAN_CONFIG1_PWM | rpm_range); + if (err) + goto exit; + + /* Fans PWM polarity high by default */ + if (max6639_info && max6639_info->pwm_polarity == 0) + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_FAN_CONFIG2a(i), 0x00); + else + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_FAN_CONFIG2a(i), 0x02); + if (err) + goto exit; + + /* + * /THERM full speed enable, + * PWM frequency 25kHz, see also GCONFIG below + */ + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_FAN_CONFIG3(i), + MAX6639_FAN_CONFIG3_THERM_FULL_SPEED | 0x03); + if (err) + goto exit; + + /* Max. temp. 80C/90C/100C */ + data->temp_therm[i] = 80; + data->temp_alert[i] = 90; + data->temp_ot[i] = 100; + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_THERM_LIMIT(i), + data->temp_therm[i]); + if (err) + goto exit; + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_ALERT_LIMIT(i), + data->temp_alert[i]); + if (err) + goto exit; + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_OT_LIMIT(i), data->temp_ot[i]); + if (err) + goto exit; + + /* PWM 120/120 (i.e. 100%) */ + data->pwm[i] = 120; + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_TARGTDUTY(i), data->pwm[i]); + if (err) + goto exit; + } + /* Start monitoring */ + err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG, + MAX6639_GCONFIG_DISABLE_TIMEOUT | MAX6639_GCONFIG_CH2_LOCAL | + MAX6639_GCONFIG_PWM_FREQ_HI); +exit: + return err; +} + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int max6639_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + int dev_id, manu_id; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + /* Actual detection via device and manufacturer ID */ + dev_id = i2c_smbus_read_byte_data(client, MAX6639_REG_DEVID); + manu_id = i2c_smbus_read_byte_data(client, MAX6639_REG_MANUID); + if (dev_id != 0x58 || manu_id != 0x4D) + return -ENODEV; + + strlcpy(info->type, "max6639", I2C_NAME_SIZE); + + return 0; +} + +static int max6639_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct max6639_data *data; + int err; + + data = kzalloc(sizeof(struct max6639_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + /* Initialize the max6639 chip */ + err = max6639_init_client(client); + if (err < 0) + goto error_free; + + /* Register sysfs hooks */ + err = sysfs_create_group(&client->dev.kobj, &max6639_group); + if (err) + goto error_free; + + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + goto error_remove; + } + + dev_info(&client->dev, "temperature sensor and fan control found\n"); + + return 0; + +error_remove: + sysfs_remove_group(&client->dev.kobj, &max6639_group); +error_free: + kfree(data); +exit: + return err; +} + +static int max6639_remove(struct i2c_client *client) +{ + struct max6639_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &max6639_group); + + kfree(data); + return 0; +} + +static int max6639_suspend(struct i2c_client *client, pm_message_t mesg) +{ + int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG); + if (data < 0) + return data; + + return i2c_smbus_write_byte_data(client, + MAX6639_REG_GCONFIG, data | MAX6639_GCONFIG_STANDBY); +} + +static int max6639_resume(struct i2c_client *client) +{ + int data = i2c_smbus_read_byte_data(client, MAX6639_REG_GCONFIG); + if (data < 0) + return data; + + return i2c_smbus_write_byte_data(client, + MAX6639_REG_GCONFIG, data & ~MAX6639_GCONFIG_STANDBY); +} + +static const struct i2c_device_id max6639_id[] = { + {"max6639", 0}, + { } +}; + +MODULE_DEVICE_TABLE(i2c, max6639_id); + +static struct i2c_driver max6639_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "max6639", + }, + .probe = max6639_probe, + .remove = max6639_remove, + .suspend = max6639_suspend, + .resume = max6639_resume, + .id_table = max6639_id, + .detect = max6639_detect, + .address_list = normal_i2c, +}; + +static int __init max6639_init(void) +{ + return i2c_add_driver(&max6639_driver); +} + +static void __exit max6639_exit(void) +{ + i2c_del_driver(&max6639_driver); +} + +MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>"); +MODULE_DESCRIPTION("max6639 driver"); +MODULE_LICENSE("GPL"); + +module_init(max6639_init); +module_exit(max6639_exit); diff --git a/drivers/hwmon/max8688.c b/drivers/hwmon/max8688.c new file mode 100644 index 000000000000..8ebfef2ecf26 --- /dev/null +++ b/drivers/hwmon/max8688.c @@ -0,0 +1,158 @@ +/* + * Hardware monitoring driver for Maxim MAX8688 + * + * Copyright (c) 2011 Ericsson AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include "pmbus.h" + +#define MAX8688_MFG_STATUS 0xd8 + +#define MAX8688_STATUS_OC_FAULT (1 << 4) +#define MAX8688_STATUS_OV_FAULT (1 << 5) +#define MAX8688_STATUS_OV_WARNING (1 << 8) +#define MAX8688_STATUS_UV_FAULT (1 << 9) +#define MAX8688_STATUS_UV_WARNING (1 << 10) +#define MAX8688_STATUS_UC_FAULT (1 << 11) +#define MAX8688_STATUS_OC_WARNING (1 << 12) +#define MAX8688_STATUS_OT_FAULT (1 << 13) +#define MAX8688_STATUS_OT_WARNING (1 << 14) + +static int max8688_get_status(struct i2c_client *client, int page, int reg) +{ + int ret = 0; + int mfg_status; + + if (page) + return -EINVAL; + + switch (reg) { + case PMBUS_STATUS_VOUT: + mfg_status = pmbus_read_word_data(client, 0, + MAX8688_MFG_STATUS); + if (mfg_status < 0) + return mfg_status; + if (mfg_status & MAX8688_STATUS_UV_WARNING) + ret |= PB_VOLTAGE_UV_WARNING; + if (mfg_status & MAX8688_STATUS_UV_FAULT) + ret |= PB_VOLTAGE_UV_FAULT; + if (mfg_status & MAX8688_STATUS_OV_WARNING) + ret |= PB_VOLTAGE_OV_WARNING; + if (mfg_status & MAX8688_STATUS_OV_FAULT) + ret |= PB_VOLTAGE_OV_FAULT; + break; + case PMBUS_STATUS_IOUT: + mfg_status = pmbus_read_word_data(client, 0, + MAX8688_MFG_STATUS); + if (mfg_status < 0) + return mfg_status; + if (mfg_status & MAX8688_STATUS_UC_FAULT) + ret |= PB_IOUT_UC_FAULT; + if (mfg_status & MAX8688_STATUS_OC_WARNING) + ret |= PB_IOUT_OC_WARNING; + if (mfg_status & MAX8688_STATUS_OC_FAULT) + ret |= PB_IOUT_OC_FAULT; + break; + case PMBUS_STATUS_TEMPERATURE: + mfg_status = pmbus_read_word_data(client, 0, + MAX8688_MFG_STATUS); + if (mfg_status < 0) + return mfg_status; + if (mfg_status & MAX8688_STATUS_OT_WARNING) + ret |= PB_TEMP_OT_WARNING; + if (mfg_status & MAX8688_STATUS_OT_FAULT) + ret |= PB_TEMP_OT_FAULT; + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static struct pmbus_driver_info max8688_info = { + .pages = 1, + .direct[PSC_VOLTAGE_IN] = true, + .direct[PSC_VOLTAGE_OUT] = true, + .direct[PSC_TEMPERATURE] = true, + .direct[PSC_CURRENT_OUT] = true, + .m[PSC_VOLTAGE_IN] = 19995, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -1, + .m[PSC_VOLTAGE_OUT] = 19995, + .b[PSC_VOLTAGE_OUT] = 0, + .R[PSC_VOLTAGE_OUT] = -1, + .m[PSC_CURRENT_OUT] = 23109, + .b[PSC_CURRENT_OUT] = 0, + .R[PSC_CURRENT_OUT] = -2, + .m[PSC_TEMPERATURE] = -7612, + .b[PSC_TEMPERATURE] = 335, + .R[PSC_TEMPERATURE] = -3, + .func[0] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP + | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT + | PMBUS_HAVE_STATUS_TEMP, + .get_status = max8688_get_status, +}; + +static int max8688_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + return pmbus_do_probe(client, id, &max8688_info); +} + +static int max8688_remove(struct i2c_client *client) +{ + return pmbus_do_remove(client); +} + +static const struct i2c_device_id max8688_id[] = { + {"max8688", 0}, + { } +}; + +MODULE_DEVICE_TABLE(i2c, max8688_id); + +/* This is the driver that will be inserted */ +static struct i2c_driver max8688_driver = { + .driver = { + .name = "max8688", + }, + .probe = max8688_probe, + .remove = max8688_remove, + .id_table = max8688_id, +}; + +static int __init max8688_init(void) +{ + return i2c_add_driver(&max8688_driver); +} + +static void __exit max8688_exit(void) +{ + i2c_del_driver(&max8688_driver); +} + +MODULE_AUTHOR("Guenter Roeck"); +MODULE_DESCRIPTION("PMBus driver for Maxim MAX8688"); +MODULE_LICENSE("GPL"); +module_init(max8688_init); +module_exit(max8688_exit); diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus.c new file mode 100644 index 000000000000..98e2e28899e2 --- /dev/null +++ b/drivers/hwmon/pmbus.c @@ -0,0 +1,203 @@ +/* + * Hardware monitoring driver for PMBus devices + * + * Copyright (c) 2010, 2011 Ericsson AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/i2c.h> +#include "pmbus.h" + +/* + * Find sensor groups and status registers on each page. + */ +static void pmbus_find_sensor_groups(struct i2c_client *client, + struct pmbus_driver_info *info) +{ + int page; + + /* Sensors detected on page 0 only */ + if (pmbus_check_word_register(client, 0, PMBUS_READ_VIN)) + info->func[0] |= PMBUS_HAVE_VIN; + if (pmbus_check_word_register(client, 0, PMBUS_READ_VCAP)) + info->func[0] |= PMBUS_HAVE_VCAP; + if (pmbus_check_word_register(client, 0, PMBUS_READ_IIN)) + info->func[0] |= PMBUS_HAVE_IIN; + if (pmbus_check_word_register(client, 0, PMBUS_READ_PIN)) + info->func[0] |= PMBUS_HAVE_PIN; + if (info->func[0] + && pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT)) + info->func[0] |= PMBUS_HAVE_STATUS_INPUT; + if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) { + info->func[0] |= PMBUS_HAVE_FAN12; + if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12)) + info->func[0] |= PMBUS_HAVE_STATUS_FAN12; + } + if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) { + info->func[0] |= PMBUS_HAVE_FAN34; + if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34)) + info->func[0] |= PMBUS_HAVE_STATUS_FAN34; + } + if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1)) { + info->func[0] |= PMBUS_HAVE_TEMP; + if (pmbus_check_byte_register(client, 0, + PMBUS_STATUS_TEMPERATURE)) + info->func[0] |= PMBUS_HAVE_STATUS_TEMP; + } + + /* Sensors detected on all pages */ + for (page = 0; page < info->pages; page++) { + if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) { + info->func[page] |= PMBUS_HAVE_VOUT; + if (pmbus_check_byte_register(client, page, + PMBUS_STATUS_VOUT)) + info->func[page] |= PMBUS_HAVE_STATUS_VOUT; + } + if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) { + info->func[page] |= PMBUS_HAVE_IOUT; + if (pmbus_check_byte_register(client, 0, + PMBUS_STATUS_IOUT)) + info->func[page] |= PMBUS_HAVE_STATUS_IOUT; + } + if (pmbus_check_word_register(client, page, PMBUS_READ_POUT)) + info->func[page] |= PMBUS_HAVE_POUT; + } +} + +/* + * Identify chip parameters. + */ +static int pmbus_identify(struct i2c_client *client, + struct pmbus_driver_info *info) +{ + if (!info->pages) { + /* + * Check if the PAGE command is supported. If it is, + * keep setting the page number until it fails or until the + * maximum number of pages has been reached. Assume that + * this is the number of pages supported by the chip. + */ + if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) { + int page; + + for (page = 1; page < PMBUS_PAGES; page++) { + if (pmbus_set_page(client, page) < 0) + break; + } + pmbus_set_page(client, 0); + info->pages = page; + } else { + info->pages = 1; + } + } + + /* + * We should check if the COEFFICIENTS register is supported. + * If it is, and the chip is configured for direct mode, we can read + * the coefficients from the chip, one set per group of sensor + * registers. + * + * To do this, we will need access to a chip which actually supports the + * COEFFICIENTS command, since the command is too complex to implement + * without testing it. + */ + + /* Try to find sensor groups */ + pmbus_find_sensor_groups(client, info); + + return 0; +} + +static int pmbus_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pmbus_driver_info *info; + int ret; + + info = kzalloc(sizeof(struct pmbus_driver_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->pages = id->driver_data; + info->identify = pmbus_identify; + + ret = pmbus_do_probe(client, id, info); + if (ret < 0) + goto out; + return 0; + +out: + kfree(info); + return ret; +} + +static int pmbus_remove(struct i2c_client *client) +{ + int ret; + const struct pmbus_driver_info *info; + + info = pmbus_get_driver_info(client); + ret = pmbus_do_remove(client); + kfree(info); + return ret; +} + +/* + * Use driver_data to set the number of pages supported by the chip. + */ +static const struct i2c_device_id pmbus_id[] = { + {"bmr450", 1}, + {"bmr451", 1}, + {"bmr453", 1}, + {"bmr454", 1}, + {"ltc2978", 8}, + {"pmbus", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, pmbus_id); + +/* This is the driver that will be inserted */ +static struct i2c_driver pmbus_driver = { + .driver = { + .name = "pmbus", + }, + .probe = pmbus_probe, + .remove = pmbus_remove, + .id_table = pmbus_id, +}; + +static int __init pmbus_init(void) +{ + return i2c_add_driver(&pmbus_driver); +} + +static void __exit pmbus_exit(void) +{ + i2c_del_driver(&pmbus_driver); +} + +MODULE_AUTHOR("Guenter Roeck"); +MODULE_DESCRIPTION("Generic PMBus driver"); +MODULE_LICENSE("GPL"); +module_init(pmbus_init); +module_exit(pmbus_exit); diff --git a/drivers/hwmon/pmbus.h b/drivers/hwmon/pmbus.h new file mode 100644 index 000000000000..a81f7f228762 --- /dev/null +++ b/drivers/hwmon/pmbus.h @@ -0,0 +1,313 @@ +/* + * pmbus.h - Common defines and structures for PMBus devices + * + * Copyright (c) 2010, 2011 Ericsson AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef PMBUS_H +#define PMBUS_H + +/* + * Registers + */ +#define PMBUS_PAGE 0x00 +#define PMBUS_OPERATION 0x01 +#define PMBUS_ON_OFF_CONFIG 0x02 +#define PMBUS_CLEAR_FAULTS 0x03 +#define PMBUS_PHASE 0x04 + +#define PMBUS_CAPABILITY 0x19 +#define PMBUS_QUERY 0x1A + +#define PMBUS_VOUT_MODE 0x20 +#define PMBUS_VOUT_COMMAND 0x21 +#define PMBUS_VOUT_TRIM 0x22 +#define PMBUS_VOUT_CAL_OFFSET 0x23 +#define PMBUS_VOUT_MAX 0x24 +#define PMBUS_VOUT_MARGIN_HIGH 0x25 +#define PMBUS_VOUT_MARGIN_LOW 0x26 +#define PMBUS_VOUT_TRANSITION_RATE 0x27 +#define PMBUS_VOUT_DROOP 0x28 +#define PMBUS_VOUT_SCALE_LOOP 0x29 +#define PMBUS_VOUT_SCALE_MONITOR 0x2A + +#define PMBUS_COEFFICIENTS 0x30 +#define PMBUS_POUT_MAX 0x31 + +#define PMBUS_FAN_CONFIG_12 0x3A +#define PMBUS_FAN_COMMAND_1 0x3B +#define PMBUS_FAN_COMMAND_2 0x3C +#define PMBUS_FAN_CONFIG_34 0x3D +#define PMBUS_FAN_COMMAND_3 0x3E +#define PMBUS_FAN_COMMAND_4 0x3F + +#define PMBUS_VOUT_OV_FAULT_LIMIT 0x40 +#define PMBUS_VOUT_OV_FAULT_RESPONSE 0x41 +#define PMBUS_VOUT_OV_WARN_LIMIT 0x42 +#define PMBUS_VOUT_UV_WARN_LIMIT 0x43 +#define PMBUS_VOUT_UV_FAULT_LIMIT 0x44 +#define PMBUS_VOUT_UV_FAULT_RESPONSE 0x45 +#define PMBUS_IOUT_OC_FAULT_LIMIT 0x46 +#define PMBUS_IOUT_OC_FAULT_RESPONSE 0x47 +#define PMBUS_IOUT_OC_LV_FAULT_LIMIT 0x48 +#define PMBUS_IOUT_OC_LV_FAULT_RESPONSE 0x49 +#define PMBUS_IOUT_OC_WARN_LIMIT 0x4A +#define PMBUS_IOUT_UC_FAULT_LIMIT 0x4B +#define PMBUS_IOUT_UC_FAULT_RESPONSE 0x4C + +#define PMBUS_OT_FAULT_LIMIT 0x4F +#define PMBUS_OT_FAULT_RESPONSE 0x50 +#define PMBUS_OT_WARN_LIMIT 0x51 +#define PMBUS_UT_WARN_LIMIT 0x52 +#define PMBUS_UT_FAULT_LIMIT 0x53 +#define PMBUS_UT_FAULT_RESPONSE 0x54 +#define PMBUS_VIN_OV_FAULT_LIMIT 0x55 +#define PMBUS_VIN_OV_FAULT_RESPONSE 0x56 +#define PMBUS_VIN_OV_WARN_LIMIT 0x57 +#define PMBUS_VIN_UV_WARN_LIMIT 0x58 +#define PMBUS_VIN_UV_FAULT_LIMIT 0x59 + +#define PMBUS_IIN_OC_FAULT_LIMIT 0x5B +#define PMBUS_IIN_OC_WARN_LIMIT 0x5D + +#define PMBUS_POUT_OP_FAULT_LIMIT 0x68 +#define PMBUS_POUT_OP_WARN_LIMIT 0x6A +#define PMBUS_PIN_OP_WARN_LIMIT 0x6B + +#define PMBUS_STATUS_BYTE 0x78 +#define PMBUS_STATUS_WORD 0x79 +#define PMBUS_STATUS_VOUT 0x7A +#define PMBUS_STATUS_IOUT 0x7B +#define PMBUS_STATUS_INPUT 0x7C +#define PMBUS_STATUS_TEMPERATURE 0x7D +#define PMBUS_STATUS_CML 0x7E +#define PMBUS_STATUS_OTHER 0x7F +#define PMBUS_STATUS_MFR_SPECIFIC 0x80 +#define PMBUS_STATUS_FAN_12 0x81 +#define PMBUS_STATUS_FAN_34 0x82 + +#define PMBUS_READ_VIN 0x88 +#define PMBUS_READ_IIN 0x89 +#define PMBUS_READ_VCAP 0x8A +#define PMBUS_READ_VOUT 0x8B +#define PMBUS_READ_IOUT 0x8C +#define PMBUS_READ_TEMPERATURE_1 0x8D +#define PMBUS_READ_TEMPERATURE_2 0x8E +#define PMBUS_READ_TEMPERATURE_3 0x8F +#define PMBUS_READ_FAN_SPEED_1 0x90 +#define PMBUS_READ_FAN_SPEED_2 0x91 +#define PMBUS_READ_FAN_SPEED_3 0x92 +#define PMBUS_READ_FAN_SPEED_4 0x93 +#define PMBUS_READ_DUTY_CYCLE 0x94 +#define PMBUS_READ_FREQUENCY 0x95 +#define PMBUS_READ_POUT 0x96 +#define PMBUS_READ_PIN 0x97 + +#define PMBUS_REVISION 0x98 +#define PMBUS_MFR_ID 0x99 +#define PMBUS_MFR_MODEL 0x9A +#define PMBUS_MFR_REVISION 0x9B +#define PMBUS_MFR_LOCATION 0x9C +#define PMBUS_MFR_DATE 0x9D +#define PMBUS_MFR_SERIAL 0x9E + +/* + * CAPABILITY + */ +#define PB_CAPABILITY_SMBALERT (1<<4) +#define PB_CAPABILITY_ERROR_CHECK (1<<7) + +/* + * VOUT_MODE + */ +#define PB_VOUT_MODE_MODE_MASK 0xe0 +#define PB_VOUT_MODE_PARAM_MASK 0x1f + +#define PB_VOUT_MODE_LINEAR 0x00 +#define PB_VOUT_MODE_VID 0x20 +#define PB_VOUT_MODE_DIRECT 0x40 + +/* + * Fan configuration + */ +#define PB_FAN_2_PULSE_MASK ((1 << 0) | (1 << 1)) +#define PB_FAN_2_RPM (1 << 2) +#define PB_FAN_2_INSTALLED (1 << 3) +#define PB_FAN_1_PULSE_MASK ((1 << 4) | (1 << 5)) +#define PB_FAN_1_RPM (1 << 6) +#define PB_FAN_1_INSTALLED (1 << 7) + +/* + * STATUS_BYTE, STATUS_WORD (lower) + */ +#define PB_STATUS_NONE_ABOVE (1<<0) +#define PB_STATUS_CML (1<<1) +#define PB_STATUS_TEMPERATURE (1<<2) +#define PB_STATUS_VIN_UV (1<<3) +#define PB_STATUS_IOUT_OC (1<<4) +#define PB_STATUS_VOUT_OV (1<<5) +#define PB_STATUS_OFF (1<<6) +#define PB_STATUS_BUSY (1<<7) + +/* + * STATUS_WORD (upper) + */ +#define PB_STATUS_UNKNOWN (1<<8) +#define PB_STATUS_OTHER (1<<9) +#define PB_STATUS_FANS (1<<10) +#define PB_STATUS_POWER_GOOD_N (1<<11) +#define PB_STATUS_WORD_MFR (1<<12) +#define PB_STATUS_INPUT (1<<13) +#define PB_STATUS_IOUT_POUT (1<<14) +#define PB_STATUS_VOUT (1<<15) + +/* + * STATUS_IOUT + */ +#define PB_POUT_OP_WARNING (1<<0) +#define PB_POUT_OP_FAULT (1<<1) +#define PB_POWER_LIMITING (1<<2) +#define PB_CURRENT_SHARE_FAULT (1<<3) +#define PB_IOUT_UC_FAULT (1<<4) +#define PB_IOUT_OC_WARNING (1<<5) +#define PB_IOUT_OC_LV_FAULT (1<<6) +#define PB_IOUT_OC_FAULT (1<<7) + +/* + * STATUS_VOUT, STATUS_INPUT + */ +#define PB_VOLTAGE_UV_FAULT (1<<4) +#define PB_VOLTAGE_UV_WARNING (1<<5) +#define PB_VOLTAGE_OV_WARNING (1<<6) +#define PB_VOLTAGE_OV_FAULT (1<<7) + +/* + * STATUS_INPUT + */ +#define PB_PIN_OP_WARNING (1<<0) +#define PB_IIN_OC_WARNING (1<<1) +#define PB_IIN_OC_FAULT (1<<2) + +/* + * STATUS_TEMPERATURE + */ +#define PB_TEMP_UT_FAULT (1<<4) +#define PB_TEMP_UT_WARNING (1<<5) +#define PB_TEMP_OT_WARNING (1<<6) +#define PB_TEMP_OT_FAULT (1<<7) + +/* + * STATUS_FAN + */ +#define PB_FAN_AIRFLOW_WARNING (1<<0) +#define PB_FAN_AIRFLOW_FAULT (1<<1) +#define PB_FAN_FAN2_SPEED_OVERRIDE (1<<2) +#define PB_FAN_FAN1_SPEED_OVERRIDE (1<<3) +#define PB_FAN_FAN2_WARNING (1<<4) +#define PB_FAN_FAN1_WARNING (1<<5) +#define PB_FAN_FAN2_FAULT (1<<6) +#define PB_FAN_FAN1_FAULT (1<<7) + +/* + * CML_FAULT_STATUS + */ +#define PB_CML_FAULT_OTHER_MEM_LOGIC (1<<0) +#define PB_CML_FAULT_OTHER_COMM (1<<1) +#define PB_CML_FAULT_PROCESSOR (1<<3) +#define PB_CML_FAULT_MEMORY (1<<4) +#define PB_CML_FAULT_PACKET_ERROR (1<<5) +#define PB_CML_FAULT_INVALID_DATA (1<<6) +#define PB_CML_FAULT_INVALID_COMMAND (1<<7) + +enum pmbus_sensor_classes { + PSC_VOLTAGE_IN = 0, + PSC_VOLTAGE_OUT, + PSC_CURRENT_IN, + PSC_CURRENT_OUT, + PSC_POWER, + PSC_TEMPERATURE, + PSC_FAN, + PSC_NUM_CLASSES /* Number of power sensor classes */ +}; + +#define PMBUS_PAGES 32 /* Per PMBus specification */ + +/* Functionality bit mask */ +#define PMBUS_HAVE_VIN (1 << 0) +#define PMBUS_HAVE_VCAP (1 << 1) +#define PMBUS_HAVE_VOUT (1 << 2) +#define PMBUS_HAVE_IIN (1 << 3) +#define PMBUS_HAVE_IOUT (1 << 4) +#define PMBUS_HAVE_PIN (1 << 5) +#define PMBUS_HAVE_POUT (1 << 6) +#define PMBUS_HAVE_FAN12 (1 << 7) +#define PMBUS_HAVE_FAN34 (1 << 8) +#define PMBUS_HAVE_TEMP (1 << 9) +#define PMBUS_HAVE_TEMP2 (1 << 10) +#define PMBUS_HAVE_TEMP3 (1 << 11) +#define PMBUS_HAVE_STATUS_VOUT (1 << 12) +#define PMBUS_HAVE_STATUS_IOUT (1 << 13) +#define PMBUS_HAVE_STATUS_INPUT (1 << 14) +#define PMBUS_HAVE_STATUS_TEMP (1 << 15) +#define PMBUS_HAVE_STATUS_FAN12 (1 << 16) +#define PMBUS_HAVE_STATUS_FAN34 (1 << 17) + +struct pmbus_driver_info { + int pages; /* Total number of pages */ + bool direct[PSC_NUM_CLASSES]; + /* true if device uses direct data format + for the given sensor class */ + /* + * Support one set of coefficients for each sensor type + * Used for chips providing data in direct mode. + */ + int m[PSC_NUM_CLASSES]; /* mantissa for direct data format */ + int b[PSC_NUM_CLASSES]; /* offset */ + int R[PSC_NUM_CLASSES]; /* exponent */ + + u32 func[PMBUS_PAGES]; /* Functionality, per page */ + /* + * The get_status function maps manufacturing specific status values + * into PMBus standard status values. + * This function is optional and only necessary if chip specific status + * register values have to be mapped into standard PMBus status register + * values. + */ + int (*get_status)(struct i2c_client *client, int page, int reg); + /* + * The identify function determines supported PMBus functionality. + * This function is only necessary if a chip driver supports multiple + * chips, and the chip functionality is not pre-determined. + */ + int (*identify)(struct i2c_client *client, + struct pmbus_driver_info *info); +}; + +/* Function declarations */ + +int pmbus_set_page(struct i2c_client *client, u8 page); +int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg); +void pmbus_clear_faults(struct i2c_client *client); +bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg); +bool pmbus_check_word_register(struct i2c_client *client, int page, int reg); +int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id, + struct pmbus_driver_info *info); +int pmbus_do_remove(struct i2c_client *client); +const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client + *client); + +#endif /* PMBUS_H */ diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c new file mode 100644 index 000000000000..6474512f49b0 --- /dev/null +++ b/drivers/hwmon/pmbus_core.c @@ -0,0 +1,1658 @@ +/* + * Hardware monitoring driver for PMBus devices + * + * Copyright (c) 2010, 2011 Ericsson AB. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/delay.h> +#include <linux/i2c/pmbus.h> +#include "pmbus.h" + +/* + * Constants needed to determine number of sensors, booleans, and labels. + */ +#define PMBUS_MAX_INPUT_SENSORS 11 /* 6*volt, 3*curr, 2*power */ +#define PMBUS_VOUT_SENSORS_PER_PAGE 5 /* input, min, max, lcrit, + crit */ +#define PMBUS_IOUT_SENSORS_PER_PAGE 4 /* input, min, max, crit */ +#define PMBUS_POUT_SENSORS_PER_PAGE 4 /* input, cap, max, crit */ +#define PMBUS_MAX_SENSORS_PER_FAN 1 /* input */ +#define PMBUS_MAX_SENSORS_PER_TEMP 5 /* input, min, max, lcrit, + crit */ + +#define PMBUS_MAX_INPUT_BOOLEANS 7 /* v: min_alarm, max_alarm, + lcrit_alarm, crit_alarm; + c: alarm, crit_alarm; + p: crit_alarm */ +#define PMBUS_VOUT_BOOLEANS_PER_PAGE 4 /* min_alarm, max_alarm, + lcrit_alarm, crit_alarm */ +#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm, + crit_alarm */ +#define PMBUS_POUT_BOOLEANS_PER_PAGE 2 /* alarm, crit_alarm */ +#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */ +#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm, + lcrit_alarm, crit_alarm */ + +#define PMBUS_MAX_INPUT_LABELS 4 /* vin, vcap, iin, pin */ + +/* + * status, status_vout, status_iout, status_fans, status_fan34, and status_temp + * are paged. status_input is unpaged. + */ +#define PB_NUM_STATUS_REG (PMBUS_PAGES * 6 + 1) + +/* + * Index into status register array, per status register group + */ +#define PB_STATUS_BASE 0 +#define PB_STATUS_VOUT_BASE (PB_STATUS_BASE + PMBUS_PAGES) +#define PB_STATUS_IOUT_BASE (PB_STATUS_VOUT_BASE + PMBUS_PAGES) +#define PB_STATUS_FAN_BASE (PB_STATUS_IOUT_BASE + PMBUS_PAGES) +#define PB_STATUS_FAN34_BASE (PB_STATUS_FAN_BASE + PMBUS_PAGES) +#define PB_STATUS_INPUT_BASE (PB_STATUS_FAN34_BASE + PMBUS_PAGES) +#define PB_STATUS_TEMP_BASE (PB_STATUS_INPUT_BASE + 1) + +struct pmbus_sensor { + char name[I2C_NAME_SIZE]; /* sysfs sensor name */ + struct sensor_device_attribute attribute; + u8 page; /* page number */ + u8 reg; /* register */ + enum pmbus_sensor_classes class; /* sensor class */ + bool update; /* runtime sensor update needed */ + int data; /* Sensor data. + Negative if there was a read error */ +}; + +struct pmbus_boolean { + char name[I2C_NAME_SIZE]; /* sysfs boolean name */ + struct sensor_device_attribute attribute; +}; + +struct pmbus_label { + char name[I2C_NAME_SIZE]; /* sysfs label name */ + struct sensor_device_attribute attribute; + char label[I2C_NAME_SIZE]; /* label */ +}; + +struct pmbus_data { + struct device *hwmon_dev; + + u32 flags; /* from platform data */ + + int exponent; /* linear mode: exponent for output voltages */ + + const struct pmbus_driver_info *info; + + int max_attributes; + int num_attributes; + struct attribute **attributes; + struct attribute_group group; + + /* + * Sensors cover both sensor and limit registers. + */ + int max_sensors; + int num_sensors; + struct pmbus_sensor *sensors; + /* + * Booleans are used for alarms. + * Values are determined from status registers. + */ + int max_booleans; + int num_booleans; + struct pmbus_boolean *booleans; + /* + * Labels are used to map generic names (e.g., "in1") + * to PMBus specific names (e.g., "vin" or "vout1"). + */ + int max_labels; + int num_labels; + struct pmbus_label *labels; + + struct mutex update_lock; + bool valid; + unsigned long last_updated; /* in jiffies */ + + /* + * A single status register covers multiple attributes, + * so we keep them all together. + */ + u8 status_bits; + u8 status[PB_NUM_STATUS_REG]; + + u8 currpage; +}; + +int pmbus_set_page(struct i2c_client *client, u8 page) +{ + struct pmbus_data *data = i2c_get_clientdata(client); + int rv = 0; + int newpage; + + if (page != data->currpage) { + rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); + newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE); + if (newpage != page) + rv = -EINVAL; + else + data->currpage = page; + } + return rv; +} +EXPORT_SYMBOL_GPL(pmbus_set_page); + +static int pmbus_write_byte(struct i2c_client *client, u8 page, u8 value) +{ + int rv; + + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; + + return i2c_smbus_write_byte(client, value); +} + +static int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, + u16 word) +{ + int rv; + + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; + + return i2c_smbus_write_word_data(client, reg, word); +} + +int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg) +{ + int rv; + + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; + + return i2c_smbus_read_word_data(client, reg); +} +EXPORT_SYMBOL_GPL(pmbus_read_word_data); + +static int pmbus_read_byte_data(struct i2c_client *client, u8 page, u8 reg) +{ + int rv; + + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; + + return i2c_smbus_read_byte_data(client, reg); +} + +static void pmbus_clear_fault_page(struct i2c_client *client, int page) +{ + pmbus_write_byte(client, page, PMBUS_CLEAR_FAULTS); +} + +void pmbus_clear_faults(struct i2c_client *client) +{ + struct pmbus_data *data = i2c_get_clientdata(client); + int i; + + for (i = 0; i < data->info->pages; i++) + pmbus_clear_fault_page(client, i); +} +EXPORT_SYMBOL_GPL(pmbus_clear_faults); + +static int pmbus_check_status_cml(struct i2c_client *client, int page) +{ + int status, status2; + + status = pmbus_read_byte_data(client, page, PMBUS_STATUS_BYTE); + if (status < 0 || (status & PB_STATUS_CML)) { + status2 = pmbus_read_byte_data(client, page, PMBUS_STATUS_CML); + if (status2 < 0 || (status2 & PB_CML_FAULT_INVALID_COMMAND)) + return -EINVAL; + } + return 0; +} + +bool pmbus_check_byte_register(struct i2c_client *client, int page, int reg) +{ + int rv; + struct pmbus_data *data = i2c_get_clientdata(client); + + rv = pmbus_read_byte_data(client, page, reg); + if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) + rv = pmbus_check_status_cml(client, page); + pmbus_clear_fault_page(client, page); + return rv >= 0; +} +EXPORT_SYMBOL_GPL(pmbus_check_byte_register); + +bool pmbus_check_word_register(struct i2c_client *client, int page, int reg) +{ + int rv; + struct pmbus_data *data = i2c_get_clientdata(client); + + rv = pmbus_read_word_data(client, page, reg); + if (rv >= 0 && !(data->flags & PMBUS_SKIP_STATUS_CHECK)) + rv = pmbus_check_status_cml(client, page); + pmbus_clear_fault_page(client, page); + return rv >= 0; +} +EXPORT_SYMBOL_GPL(pmbus_check_word_register); + +const struct pmbus_driver_info *pmbus_get_driver_info(struct i2c_client *client) +{ + struct pmbus_data *data = i2c_get_clientdata(client); + + return data->info; +} +EXPORT_SYMBOL_GPL(pmbus_get_driver_info); + +static int pmbus_get_status(struct i2c_client *client, int page, int reg) +{ + struct pmbus_data *data = i2c_get_clientdata(client); + const struct pmbus_driver_info *info = data->info; + int status; + + if (info->get_status) { + status = info->get_status(client, page, reg); + if (status != -ENODATA) + return status; + } + return pmbus_read_byte_data(client, page, reg); +} + +static struct pmbus_data *pmbus_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct pmbus_data *data = i2c_get_clientdata(client); + const struct pmbus_driver_info *info = data->info; + + mutex_lock(&data->update_lock); + if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { + int i; + + for (i = 0; i < info->pages; i++) + data->status[PB_STATUS_BASE + i] + = pmbus_read_byte_data(client, i, + PMBUS_STATUS_BYTE); + for (i = 0; i < info->pages; i++) { + if (!(info->func[i] & PMBUS_HAVE_STATUS_VOUT)) + continue; + data->status[PB_STATUS_VOUT_BASE + i] + = pmbus_get_status(client, i, PMBUS_STATUS_VOUT); + } + for (i = 0; i < info->pages; i++) { + if (!(info->func[i] & PMBUS_HAVE_STATUS_IOUT)) + continue; + data->status[PB_STATUS_IOUT_BASE + i] + = pmbus_get_status(client, i, PMBUS_STATUS_IOUT); + } + for (i = 0; i < info->pages; i++) { + if (!(info->func[i] & PMBUS_HAVE_STATUS_TEMP)) + continue; + data->status[PB_STATUS_TEMP_BASE + i] + = pmbus_get_status(client, i, + PMBUS_STATUS_TEMPERATURE); + } + for (i = 0; i < info->pages; i++) { + if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN12)) + continue; + data->status[PB_STATUS_FAN_BASE + i] + = pmbus_get_status(client, i, PMBUS_STATUS_FAN_12); + } + + for (i = 0; i < info->pages; i++) { + if (!(info->func[i] & PMBUS_HAVE_STATUS_FAN34)) + continue; + data->status[PB_STATUS_FAN34_BASE + i] + = pmbus_get_status(client, i, PMBUS_STATUS_FAN_34); + } + + if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) + data->status[PB_STATUS_INPUT_BASE] + = pmbus_get_status(client, 0, PMBUS_STATUS_INPUT); + + for (i = 0; i < data->num_sensors; i++) { + struct pmbus_sensor *sensor = &data->sensors[i]; + + if (!data->valid || sensor->update) + sensor->data + = pmbus_read_word_data(client, sensor->page, + sensor->reg); + } + pmbus_clear_faults(client); + data->last_updated = jiffies; + data->valid = 1; + } + mutex_unlock(&data->update_lock); + return data; +} + +/* + * Convert linear sensor values to milli- or micro-units + * depending on sensor type. + */ +static int pmbus_reg2data_linear(struct pmbus_data *data, + struct pmbus_sensor *sensor) +{ + s16 exponent; + s32 mantissa; + long val; + + if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */ + exponent = data->exponent; + mantissa = (u16) sensor->data; + } else { /* LINEAR11 */ + exponent = (sensor->data >> 11) & 0x001f; + mantissa = sensor->data & 0x07ff; + + if (exponent > 0x0f) + exponent |= 0xffe0; /* sign extend exponent */ + if (mantissa > 0x03ff) + mantissa |= 0xfffff800; /* sign extend mantissa */ + } + + val = mantissa; + + /* scale result to milli-units for all sensors except fans */ + if (sensor->class != PSC_FAN) + val = val * 1000L; + + /* scale result to micro-units for power sensors */ + if (sensor->class == PSC_POWER) + val = val * 1000L; + + if (exponent >= 0) + val <<= exponent; + else + val >>= -exponent; + + return (int)val; +} + +/* + * Convert direct sensor values to milli- or micro-units + * depending on sensor type. + */ +static int pmbus_reg2data_direct(struct pmbus_data *data, + struct pmbus_sensor *sensor) +{ + long val = (s16) sensor->data; + long m, b, R; + + m = data->info->m[sensor->class]; + b = data->info->b[sensor->class]; + R = data->info->R[sensor->class]; + + if (m == 0) + return 0; + + /* X = 1/m * (Y * 10^-R - b) */ + R = -R; + /* scale result to milli-units for everything but fans */ + if (sensor->class != PSC_FAN) { + R += 3; + b *= 1000; + } + + /* scale result to micro-units for power sensors */ + if (sensor->class == PSC_POWER) { + R += 3; + b *= 1000; + } + + while (R > 0) { + val *= 10; + R--; + } + while (R < 0) { + val = DIV_ROUND_CLOSEST(val, 10); + R++; + } + + return (int)((val - b) / m); +} + +static int pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor) +{ + int val; + + if (data->info->direct[sensor->class]) + val = pmbus_reg2data_direct(data, sensor); + else + val = pmbus_reg2data_linear(data, sensor); + + return val; +} + +#define MAX_MANTISSA (1023 * 1000) +#define MIN_MANTISSA (511 * 1000) + +static u16 pmbus_data2reg_linear(struct pmbus_data *data, + enum pmbus_sensor_classes class, long val) +{ + s16 exponent = 0, mantissa; + bool negative = false; + + /* simple case */ + if (val == 0) + return 0; + + if (class == PSC_VOLTAGE_OUT) { + /* LINEAR16 does not support negative voltages */ + if (val < 0) + return 0; + + /* + * For a static exponents, we don't have a choice + * but to adjust the value to it. + */ + if (data->exponent < 0) + val <<= -data->exponent; + else + val >>= data->exponent; + val = DIV_ROUND_CLOSEST(val, 1000); + return val & 0xffff; + } + + if (val < 0) { + negative = true; + val = -val; + } + + /* Power is in uW. Convert to mW before converting. */ + if (class == PSC_POWER) + val = DIV_ROUND_CLOSEST(val, 1000L); + + /* + * For simplicity, convert fan data to milli-units + * before calculating the exponent. + */ + if (class == PSC_FAN) + val = val * 1000; + + /* Reduce large mantissa until it fits into 10 bit */ + while (val >= MAX_MANTISSA && exponent < 15) { + exponent++; + val >>= 1; + } + /* Increase small mantissa to improve precision */ + while (val < MIN_MANTISSA && exponent > -15) { + exponent--; + val <<= 1; + } + + /* Convert mantissa from milli-units to units */ + mantissa = DIV_ROUND_CLOSEST(val, 1000); + + /* Ensure that resulting number is within range */ + if (mantissa > 0x3ff) + mantissa = 0x3ff; + + /* restore sign */ + if (negative) + mantissa = -mantissa; + + /* Convert to 5 bit exponent, 11 bit mantissa */ + return (mantissa & 0x7ff) | ((exponent << 11) & 0xf800); +} + +static u16 pmbus_data2reg_direct(struct pmbus_data *data, + enum pmbus_sensor_classes class, long val) +{ + long m, b, R; + + m = data->info->m[class]; + b = data->info->b[class]; + R = data->info->R[class]; + + /* Power is in uW. Adjust R and b. */ + if (class == PSC_POWER) { + R -= 3; + b *= 1000; + } + + /* Calculate Y = (m * X + b) * 10^R */ + if (class != PSC_FAN) { + R -= 3; /* Adjust R and b for data in milli-units */ + b *= 1000; + } + val = val * m + b; + + while (R > 0) { + val *= 10; + R--; + } + while (R < 0) { + val = DIV_ROUND_CLOSEST(val, 10); + R++; + } + + return val; +} + +static u16 pmbus_data2reg(struct pmbus_data *data, + enum pmbus_sensor_classes class, long val) +{ + u16 regval; + + if (data->info->direct[class]) + regval = pmbus_data2reg_direct(data, class, val); + else + regval = pmbus_data2reg_linear(data, class, val); + + return regval; +} + +/* + * Return boolean calculated from converted data. + * <index> defines a status register index and mask, and optionally + * two sensor indexes. + * The upper half-word references the two sensors, + * two sensor indices. + * The upper half-word references the two optional sensors, + * the lower half word references status register and mask. + * The function returns true if (status[reg] & mask) is true and, + * if specified, if v1 >= v2. + * To determine if an object exceeds upper limits, specify <v, limit>. + * To determine if an object exceeds lower limits, specify <limit, v>. + * + * For booleans created with pmbus_add_boolean_reg(), only the lower 16 bits of + * index are set. s1 and s2 (the sensor index values) are zero in this case. + * The function returns true if (status[reg] & mask) is true. + * + * If the boolean was created with pmbus_add_boolean_cmp(), a comparison against + * a specified limit has to be performed to determine the boolean result. + * In this case, the function returns true if v1 >= v2 (where v1 and v2 are + * sensor values referenced by sensor indices s1 and s2). + * + * To determine if an object exceeds upper limits, specify <s1,s2> = <v,limit>. + * To determine if an object exceeds lower limits, specify <s1,s2> = <limit,v>. + * + * If a negative value is stored in any of the referenced registers, this value + * reflects an error code which will be returned. + */ +static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val) +{ + u8 s1 = (index >> 24) & 0xff; + u8 s2 = (index >> 16) & 0xff; + u8 reg = (index >> 8) & 0xff; + u8 mask = index & 0xff; + int status; + u8 regval; + + status = data->status[reg]; + if (status < 0) + return status; + + regval = status & mask; + if (!s1 && !s2) + *val = !!regval; + else { + int v1, v2; + struct pmbus_sensor *sensor1, *sensor2; + + sensor1 = &data->sensors[s1]; + if (sensor1->data < 0) + return sensor1->data; + sensor2 = &data->sensors[s2]; + if (sensor2->data < 0) + return sensor2->data; + + v1 = pmbus_reg2data(data, sensor1); + v2 = pmbus_reg2data(data, sensor2); + *val = !!(regval && v1 >= v2); + } + return 0; +} + +static ssize_t pmbus_show_boolean(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct pmbus_data *data = pmbus_update_device(dev); + int val; + int err; + + err = pmbus_get_boolean(data, attr->index, &val); + if (err) + return err; + return snprintf(buf, PAGE_SIZE, "%d\n", val); +} + +static ssize_t pmbus_show_sensor(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct pmbus_data *data = pmbus_update_device(dev); + struct pmbus_sensor *sensor; + + sensor = &data->sensors[attr->index]; + if (sensor->data < 0) + return sensor->data; + + return snprintf(buf, PAGE_SIZE, "%d\n", pmbus_reg2data(data, sensor)); +} + +static ssize_t pmbus_set_sensor(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct i2c_client *client = to_i2c_client(dev); + struct pmbus_data *data = i2c_get_clientdata(client); + struct pmbus_sensor *sensor = &data->sensors[attr->index]; + ssize_t rv = count; + long val = 0; + int ret; + u16 regval; + + if (strict_strtol(buf, 10, &val) < 0) + return -EINVAL; + + mutex_lock(&data->update_lock); + regval = pmbus_data2reg(data, sensor->class, val); + ret = pmbus_write_word_data(client, sensor->page, sensor->reg, regval); + if (ret < 0) + rv = ret; + else + data->sensors[attr->index].data = regval; + mutex_unlock(&data->update_lock); + return rv; +} + +static ssize_t pmbus_show_label(struct device *dev, + struct device_attribute *da, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct pmbus_data *data = i2c_get_clientdata(client); + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + + return snprintf(buf, PAGE_SIZE, "%s\n", + data->labels[attr->index].label); +} + +#define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \ +do { \ + struct sensor_device_attribute *a \ + = &data->_type##s[data->num_##_type##s].attribute; \ + BUG_ON(data->num_attributes >= data->max_attributes); \ + a->dev_attr.attr.name = _name; \ + a->dev_attr.attr.mode = _mode; \ + a->dev_attr.show = _show; \ + a->dev_attr.store = _set; \ + a->index = _idx; \ + data->attributes[data->num_attributes] = &a->dev_attr.attr; \ + data->num_attributes++; \ +} while (0) + +#define PMBUS_ADD_GET_ATTR(data, _name, _type, _idx) \ + PMBUS_ADD_ATTR(data, _name, _idx, S_IRUGO, _type, \ + pmbus_show_##_type, NULL) + +#define PMBUS_ADD_SET_ATTR(data, _name, _type, _idx) \ + PMBUS_ADD_ATTR(data, _name, _idx, S_IWUSR | S_IRUGO, _type, \ + pmbus_show_##_type, pmbus_set_##_type) + +static void pmbus_add_boolean(struct pmbus_data *data, + const char *name, const char *type, int seq, + int idx) +{ + struct pmbus_boolean *boolean; + + BUG_ON(data->num_booleans >= data->max_booleans); + + boolean = &data->booleans[data->num_booleans]; + + snprintf(boolean->name, sizeof(boolean->name), "%s%d_%s", + name, seq, type); + PMBUS_ADD_GET_ATTR(data, boolean->name, boolean, idx); + data->num_booleans++; +} + +static void pmbus_add_boolean_reg(struct pmbus_data *data, + const char *name, const char *type, + int seq, int reg, int bit) +{ + pmbus_add_boolean(data, name, type, seq, (reg << 8) | bit); +} + +static void pmbus_add_boolean_cmp(struct pmbus_data *data, + const char *name, const char *type, + int seq, int i1, int i2, int reg, int mask) +{ + pmbus_add_boolean(data, name, type, seq, + (i1 << 24) | (i2 << 16) | (reg << 8) | mask); +} + +static void pmbus_add_sensor(struct pmbus_data *data, + const char *name, const char *type, int seq, + int page, int reg, enum pmbus_sensor_classes class, + bool update) +{ + struct pmbus_sensor *sensor; + + BUG_ON(data->num_sensors >= data->max_sensors); + + sensor = &data->sensors[data->num_sensors]; + snprintf(sensor->name, sizeof(sensor->name), "%s%d_%s", + name, seq, type); + sensor->page = page; + sensor->reg = reg; + sensor->class = class; + sensor->update = update; + if (update) + PMBUS_ADD_GET_ATTR(data, sensor->name, sensor, + data->num_sensors); + else + PMBUS_ADD_SET_ATTR(data, sensor->name, sensor, + data->num_sensors); + data->num_sensors++; +} + +static void pmbus_add_label(struct pmbus_data *data, + const char *name, int seq, + const char *lstring, int index) +{ + struct pmbus_label *label; + + BUG_ON(data->num_labels >= data->max_labels); + + label = &data->labels[data->num_labels]; + snprintf(label->name, sizeof(label->name), "%s%d_label", name, seq); + if (!index) + strncpy(label->label, lstring, sizeof(label->label) - 1); + else + snprintf(label->label, sizeof(label->label), "%s%d", lstring, + index); + + PMBUS_ADD_GET_ATTR(data, label->name, label, data->num_labels); + data->num_labels++; +} + +static const int pmbus_temp_registers[] = { + PMBUS_READ_TEMPERATURE_1, + PMBUS_READ_TEMPERATURE_2, + PMBUS_READ_TEMPERATURE_3 +}; + +static const int pmbus_temp_flags[] = { + PMBUS_HAVE_TEMP, + PMBUS_HAVE_TEMP2, + PMBUS_HAVE_TEMP3 +}; + +static const int pmbus_fan_registers[] = { + PMBUS_READ_FAN_SPEED_1, + PMBUS_READ_FAN_SPEED_2, + PMBUS_READ_FAN_SPEED_3, + PMBUS_READ_FAN_SPEED_4 +}; + +static const int pmbus_fan_config_registers[] = { + PMBUS_FAN_CONFIG_12, + PMBUS_FAN_CONFIG_12, + PMBUS_FAN_CONFIG_34, + PMBUS_FAN_CONFIG_34 +}; + +static const int pmbus_fan_status_registers[] = { + PMBUS_STATUS_FAN_12, + PMBUS_STATUS_FAN_12, + PMBUS_STATUS_FAN_34, + PMBUS_STATUS_FAN_34 +}; + +static const u32 pmbus_fan_flags[] = { + PMBUS_HAVE_FAN12, + PMBUS_HAVE_FAN12, + PMBUS_HAVE_FAN34, + PMBUS_HAVE_FAN34 +}; + +static const u32 pmbus_fan_status_flags[] = { + PMBUS_HAVE_STATUS_FAN12, + PMBUS_HAVE_STATUS_FAN12, + PMBUS_HAVE_STATUS_FAN34, + PMBUS_HAVE_STATUS_FAN34 +}; + +/* + * Determine maximum number of sensors, booleans, and labels. + * To keep things simple, only make a rough high estimate. + */ +static void pmbus_find_max_attr(struct i2c_client *client, + struct pmbus_data *data) +{ + const struct pmbus_driver_info *info = data->info; + int page, max_sensors, max_booleans, max_labels; + + max_sensors = PMBUS_MAX_INPUT_SENSORS; + max_booleans = PMBUS_MAX_INPUT_BOOLEANS; + max_labels = PMBUS_MAX_INPUT_LABELS; + + for (page = 0; page < info->pages; page++) { + if (info->func[page] & PMBUS_HAVE_VOUT) { + max_sensors += PMBUS_VOUT_SENSORS_PER_PAGE; + max_booleans += PMBUS_VOUT_BOOLEANS_PER_PAGE; + max_labels++; + } + if (info->func[page] & PMBUS_HAVE_IOUT) { + max_sensors += PMBUS_IOUT_SENSORS_PER_PAGE; + max_booleans += PMBUS_IOUT_BOOLEANS_PER_PAGE; + max_labels++; + } + if (info->func[page] & PMBUS_HAVE_POUT) { + max_sensors += PMBUS_POUT_SENSORS_PER_PAGE; + max_booleans += PMBUS_POUT_BOOLEANS_PER_PAGE; + max_labels++; + } + if (info->func[page] & PMBUS_HAVE_FAN12) { + max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN; + max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN; + } + if (info->func[page] & PMBUS_HAVE_FAN34) { + max_sensors += 2 * PMBUS_MAX_SENSORS_PER_FAN; + max_booleans += 2 * PMBUS_MAX_BOOLEANS_PER_FAN; + } + if (info->func[page] & PMBUS_HAVE_TEMP) { + max_sensors += PMBUS_MAX_SENSORS_PER_TEMP; + max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP; + } + if (info->func[page] & PMBUS_HAVE_TEMP2) { + max_sensors += PMBUS_MAX_SENSORS_PER_TEMP; + max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP; + } + if (info->func[page] & PMBUS_HAVE_TEMP3) { + max_sensors += PMBUS_MAX_SENSORS_PER_TEMP; + max_booleans += PMBUS_MAX_BOOLEANS_PER_TEMP; + } + } + data->max_sensors = max_sensors; + data->max_booleans = max_booleans; + data->max_labels = max_labels; + data->max_attributes = max_sensors + max_booleans + max_labels; +} + +/* + * Search for attributes. Allocate sensors, booleans, and labels as needed. + */ +static void pmbus_find_attributes(struct i2c_client *client, + struct pmbus_data *data) +{ + const struct pmbus_driver_info *info = data->info; + int page, i0, i1, in_index; + + /* + * Input voltage sensors + */ + in_index = 1; + if (info->func[0] & PMBUS_HAVE_VIN) { + bool have_alarm = false; + + i0 = data->num_sensors; + pmbus_add_label(data, "in", in_index, "vin", 0); + pmbus_add_sensor(data, "in", "input", in_index, + 0, PMBUS_READ_VIN, PSC_VOLTAGE_IN, true); + if (pmbus_check_word_register(client, 0, + PMBUS_VIN_UV_WARN_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "in", "min", in_index, + 0, PMBUS_VIN_UV_WARN_LIMIT, + PSC_VOLTAGE_IN, false); + if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { + pmbus_add_boolean_reg(data, "in", "min_alarm", + in_index, + PB_STATUS_INPUT_BASE, + PB_VOLTAGE_UV_WARNING); + have_alarm = true; + } + } + if (pmbus_check_word_register(client, 0, + PMBUS_VIN_UV_FAULT_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "in", "lcrit", in_index, + 0, PMBUS_VIN_UV_FAULT_LIMIT, + PSC_VOLTAGE_IN, false); + if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { + pmbus_add_boolean_reg(data, "in", "lcrit_alarm", + in_index, + PB_STATUS_INPUT_BASE, + PB_VOLTAGE_UV_FAULT); + have_alarm = true; + } + } + if (pmbus_check_word_register(client, 0, + PMBUS_VIN_OV_WARN_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "in", "max", in_index, + 0, PMBUS_VIN_OV_WARN_LIMIT, + PSC_VOLTAGE_IN, false); + if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { + pmbus_add_boolean_reg(data, "in", "max_alarm", + in_index, + PB_STATUS_INPUT_BASE, + PB_VOLTAGE_OV_WARNING); + have_alarm = true; + } + } + if (pmbus_check_word_register(client, 0, + PMBUS_VIN_OV_FAULT_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "in", "crit", in_index, + 0, PMBUS_VIN_OV_FAULT_LIMIT, + PSC_VOLTAGE_IN, false); + if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { + pmbus_add_boolean_reg(data, "in", "crit_alarm", + in_index, + PB_STATUS_INPUT_BASE, + PB_VOLTAGE_OV_FAULT); + have_alarm = true; + } + } + /* + * Add generic alarm attribute only if there are no individual + * attributes. + */ + if (!have_alarm) + pmbus_add_boolean_reg(data, "in", "alarm", + in_index, + PB_STATUS_BASE, + PB_STATUS_VIN_UV); + in_index++; + } + if (info->func[0] & PMBUS_HAVE_VCAP) { + pmbus_add_label(data, "in", in_index, "vcap", 0); + pmbus_add_sensor(data, "in", "input", in_index, 0, + PMBUS_READ_VCAP, PSC_VOLTAGE_IN, true); + in_index++; + } + + /* + * Output voltage sensors + */ + for (page = 0; page < info->pages; page++) { + bool have_alarm = false; + + if (!(info->func[page] & PMBUS_HAVE_VOUT)) + continue; + + i0 = data->num_sensors; + pmbus_add_label(data, "in", in_index, "vout", page + 1); + pmbus_add_sensor(data, "in", "input", in_index, page, + PMBUS_READ_VOUT, PSC_VOLTAGE_OUT, true); + if (pmbus_check_word_register(client, page, + PMBUS_VOUT_UV_WARN_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "in", "min", in_index, page, + PMBUS_VOUT_UV_WARN_LIMIT, + PSC_VOLTAGE_OUT, false); + if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { + pmbus_add_boolean_reg(data, "in", "min_alarm", + in_index, + PB_STATUS_VOUT_BASE + + page, + PB_VOLTAGE_UV_WARNING); + have_alarm = true; + } + } + if (pmbus_check_word_register(client, page, + PMBUS_VOUT_UV_FAULT_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "in", "lcrit", in_index, page, + PMBUS_VOUT_UV_FAULT_LIMIT, + PSC_VOLTAGE_OUT, false); + if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { + pmbus_add_boolean_reg(data, "in", "lcrit_alarm", + in_index, + PB_STATUS_VOUT_BASE + + page, + PB_VOLTAGE_UV_FAULT); + have_alarm = true; + } + } + if (pmbus_check_word_register(client, page, + PMBUS_VOUT_OV_WARN_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "in", "max", in_index, page, + PMBUS_VOUT_OV_WARN_LIMIT, + PSC_VOLTAGE_OUT, false); + if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { + pmbus_add_boolean_reg(data, "in", "max_alarm", + in_index, + PB_STATUS_VOUT_BASE + + page, + PB_VOLTAGE_OV_WARNING); + have_alarm = true; + } + } + if (pmbus_check_word_register(client, page, + PMBUS_VOUT_OV_FAULT_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "in", "crit", in_index, page, + PMBUS_VOUT_OV_FAULT_LIMIT, + PSC_VOLTAGE_OUT, false); + if (info->func[page] & PMBUS_HAVE_STATUS_VOUT) { + pmbus_add_boolean_reg(data, "in", "crit_alarm", + in_index, + PB_STATUS_VOUT_BASE + + page, + PB_VOLTAGE_OV_FAULT); + have_alarm = true; + } + } + /* + * Add generic alarm attribute only if there are no individual + * attributes. + */ + if (!have_alarm) + pmbus_add_boolean_reg(data, "in", "alarm", + in_index, + PB_STATUS_BASE + page, + PB_STATUS_VOUT_OV); + in_index++; + } + + /* + * Current sensors + */ + + /* + * Input current sensors + */ + in_index = 1; + if (info->func[0] & PMBUS_HAVE_IIN) { + i0 = data->num_sensors; + pmbus_add_label(data, "curr", in_index, "iin", 0); + pmbus_add_sensor(data, "curr", "input", in_index, + 0, PMBUS_READ_IIN, PSC_CURRENT_IN, true); + if (pmbus_check_word_register(client, 0, + PMBUS_IIN_OC_WARN_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "curr", "max", in_index, + 0, PMBUS_IIN_OC_WARN_LIMIT, + PSC_CURRENT_IN, false); + if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) { + pmbus_add_boolean_reg(data, "curr", "max_alarm", + in_index, + PB_STATUS_INPUT_BASE, + PB_IIN_OC_WARNING); + } + } + if (pmbus_check_word_register(client, 0, + PMBUS_IIN_OC_FAULT_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "curr", "crit", in_index, + 0, PMBUS_IIN_OC_FAULT_LIMIT, + PSC_CURRENT_IN, false); + if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) + pmbus_add_boolean_reg(data, "curr", + "crit_alarm", + in_index, + PB_STATUS_INPUT_BASE, + PB_IIN_OC_FAULT); + } + in_index++; + } + + /* + * Output current sensors + */ + for (page = 0; page < info->pages; page++) { + bool have_alarm = false; + + if (!(info->func[page] & PMBUS_HAVE_IOUT)) + continue; + + i0 = data->num_sensors; + pmbus_add_label(data, "curr", in_index, "iout", page + 1); + pmbus_add_sensor(data, "curr", "input", in_index, page, + PMBUS_READ_IOUT, PSC_CURRENT_OUT, true); + if (pmbus_check_word_register(client, page, + PMBUS_IOUT_OC_WARN_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "curr", "max", in_index, page, + PMBUS_IOUT_OC_WARN_LIMIT, + PSC_CURRENT_OUT, false); + if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { + pmbus_add_boolean_reg(data, "curr", "max_alarm", + in_index, + PB_STATUS_IOUT_BASE + + page, PB_IOUT_OC_WARNING); + have_alarm = true; + } + } + if (pmbus_check_word_register(client, page, + PMBUS_IOUT_UC_FAULT_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "curr", "lcrit", in_index, page, + PMBUS_IOUT_UC_FAULT_LIMIT, + PSC_CURRENT_OUT, false); + if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { + pmbus_add_boolean_reg(data, "curr", + "lcrit_alarm", + in_index, + PB_STATUS_IOUT_BASE + + page, PB_IOUT_UC_FAULT); + have_alarm = true; + } + } + if (pmbus_check_word_register(client, page, + PMBUS_IOUT_OC_FAULT_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "curr", "crit", in_index, page, + PMBUS_IOUT_OC_FAULT_LIMIT, + PSC_CURRENT_OUT, false); + if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) { + pmbus_add_boolean_reg(data, "curr", + "crit_alarm", + in_index, + PB_STATUS_IOUT_BASE + + page, PB_IOUT_OC_FAULT); + have_alarm = true; + } + } + /* + * Add generic alarm attribute only if there are no individual + * attributes. + */ + if (!have_alarm) + pmbus_add_boolean_reg(data, "curr", "alarm", + in_index, + PB_STATUS_BASE + page, + PB_STATUS_IOUT_OC); + in_index++; + } + + /* + * Power sensors + */ + /* + * Input Power sensors + */ + in_index = 1; + if (info->func[0] & PMBUS_HAVE_PIN) { + i0 = data->num_sensors; + pmbus_add_label(data, "power", in_index, "pin", 0); + pmbus_add_sensor(data, "power", "input", in_index, + 0, PMBUS_READ_PIN, PSC_POWER, true); + if (pmbus_check_word_register(client, 0, + PMBUS_PIN_OP_WARN_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "power", "max", in_index, + 0, PMBUS_PIN_OP_WARN_LIMIT, PSC_POWER, + false); + if (info->func[0] & PMBUS_HAVE_STATUS_INPUT) + pmbus_add_boolean_reg(data, "power", + "alarm", + in_index, + PB_STATUS_INPUT_BASE, + PB_PIN_OP_WARNING); + } + in_index++; + } + + /* + * Output Power sensors + */ + for (page = 0; page < info->pages; page++) { + bool need_alarm = false; + + if (!(info->func[page] & PMBUS_HAVE_POUT)) + continue; + + i0 = data->num_sensors; + pmbus_add_label(data, "power", in_index, "pout", page + 1); + pmbus_add_sensor(data, "power", "input", in_index, page, + PMBUS_READ_POUT, PSC_POWER, true); + /* + * Per hwmon sysfs API, power_cap is to be used to limit output + * power. + * We have two registers related to maximum output power, + * PMBUS_POUT_MAX and PMBUS_POUT_OP_WARN_LIMIT. + * PMBUS_POUT_MAX matches the powerX_cap attribute definition. + * There is no attribute in the API to match + * PMBUS_POUT_OP_WARN_LIMIT. We use powerX_max for now. + */ + if (pmbus_check_word_register(client, page, PMBUS_POUT_MAX)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "power", "cap", in_index, page, + PMBUS_POUT_MAX, PSC_POWER, false); + need_alarm = true; + } + if (pmbus_check_word_register(client, page, + PMBUS_POUT_OP_WARN_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "power", "max", in_index, page, + PMBUS_POUT_OP_WARN_LIMIT, PSC_POWER, + false); + need_alarm = true; + } + if (need_alarm && (info->func[page] & PMBUS_HAVE_STATUS_IOUT)) + pmbus_add_boolean_reg(data, "power", "alarm", + in_index, + PB_STATUS_IOUT_BASE + page, + PB_POUT_OP_WARNING + | PB_POWER_LIMITING); + + if (pmbus_check_word_register(client, page, + PMBUS_POUT_OP_FAULT_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "power", "crit", in_index, page, + PMBUS_POUT_OP_FAULT_LIMIT, PSC_POWER, + false); + if (info->func[page] & PMBUS_HAVE_STATUS_IOUT) + pmbus_add_boolean_reg(data, "power", + "crit_alarm", + in_index, + PB_STATUS_IOUT_BASE + + page, + PB_POUT_OP_FAULT); + } + in_index++; + } + + /* + * Temperature sensors + */ + in_index = 1; + for (page = 0; page < info->pages; page++) { + int t; + + for (t = 0; t < ARRAY_SIZE(pmbus_temp_registers); t++) { + bool have_alarm = false; + + /* + * A PMBus chip may support any combination of + * temperature registers on any page. So we can not + * abort after a failure to detect a register, but have + * to continue checking for all registers on all pages. + */ + if (!(info->func[page] & pmbus_temp_flags[t])) + continue; + + if (!pmbus_check_word_register + (client, page, pmbus_temp_registers[t])) + continue; + + i0 = data->num_sensors; + pmbus_add_sensor(data, "temp", "input", in_index, page, + pmbus_temp_registers[t], + PSC_TEMPERATURE, true); + + /* + * PMBus provides only one status register for TEMP1-3. + * Thus, we can not use the status register to determine + * which of the three sensors actually caused an alarm. + * Always compare current temperature against the limit + * registers to determine alarm conditions for a + * specific sensor. + * + * Since there is only one set of limit registers for + * up to three temperature sensors, we need to update + * all limit registers after the limit was changed for + * one of the sensors. This ensures that correct limits + * are reported for all temperature sensors. + */ + if (pmbus_check_word_register + (client, page, PMBUS_UT_WARN_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "temp", "min", in_index, + page, PMBUS_UT_WARN_LIMIT, + PSC_TEMPERATURE, true); + if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { + pmbus_add_boolean_cmp(data, "temp", + "min_alarm", in_index, i1, i0, + PB_STATUS_TEMP_BASE + page, + PB_TEMP_UT_WARNING); + have_alarm = true; + } + } + if (pmbus_check_word_register(client, page, + PMBUS_UT_FAULT_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "temp", "lcrit", + in_index, page, + PMBUS_UT_FAULT_LIMIT, + PSC_TEMPERATURE, true); + if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { + pmbus_add_boolean_cmp(data, "temp", + "lcrit_alarm", in_index, i1, i0, + PB_STATUS_TEMP_BASE + page, + PB_TEMP_UT_FAULT); + have_alarm = true; + } + } + if (pmbus_check_word_register + (client, page, PMBUS_OT_WARN_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "temp", "max", in_index, + page, PMBUS_OT_WARN_LIMIT, + PSC_TEMPERATURE, true); + if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { + pmbus_add_boolean_cmp(data, "temp", + "max_alarm", in_index, i0, i1, + PB_STATUS_TEMP_BASE + page, + PB_TEMP_OT_WARNING); + have_alarm = true; + } + } + if (pmbus_check_word_register(client, page, + PMBUS_OT_FAULT_LIMIT)) { + i1 = data->num_sensors; + pmbus_add_sensor(data, "temp", "crit", in_index, + page, PMBUS_OT_FAULT_LIMIT, + PSC_TEMPERATURE, true); + if (info->func[page] & PMBUS_HAVE_STATUS_TEMP) { + pmbus_add_boolean_cmp(data, "temp", + "crit_alarm", in_index, i0, i1, + PB_STATUS_TEMP_BASE + page, + PB_TEMP_OT_FAULT); + have_alarm = true; + } + } + /* + * Last resort - we were not able to create any alarm + * registers. Report alarm for all sensors using the + * status register temperature alarm bit. + */ + if (!have_alarm) + pmbus_add_boolean_reg(data, "temp", "alarm", + in_index, + PB_STATUS_BASE + page, + PB_STATUS_TEMPERATURE); + in_index++; + } + } + + /* + * Fans + */ + in_index = 1; + for (page = 0; page < info->pages; page++) { + int f; + + for (f = 0; f < ARRAY_SIZE(pmbus_fan_registers); f++) { + int regval; + + if (!(info->func[page] & pmbus_fan_flags[f])) + break; + + if (!pmbus_check_word_register(client, page, + pmbus_fan_registers[f]) + || !pmbus_check_byte_register(client, page, + pmbus_fan_config_registers[f])) + break; + + /* + * Skip fan if not installed. + * Each fan configuration register covers multiple fans, + * so we have to do some magic. + */ + regval = pmbus_read_byte_data(client, page, + pmbus_fan_config_registers[f]); + if (regval < 0 || + (!(regval & (PB_FAN_1_INSTALLED >> ((f & 1) * 4))))) + continue; + + i0 = data->num_sensors; + pmbus_add_sensor(data, "fan", "input", in_index, page, + pmbus_fan_registers[f], PSC_FAN, true); + + /* + * Each fan status register covers multiple fans, + * so we have to do some magic. + */ + if ((info->func[page] & pmbus_fan_status_flags[f]) && + pmbus_check_byte_register(client, + page, pmbus_fan_status_registers[f])) { + int base; + + if (f > 1) /* fan 3, 4 */ + base = PB_STATUS_FAN34_BASE + page; + else + base = PB_STATUS_FAN_BASE + page; + pmbus_add_boolean_reg(data, "fan", "alarm", + in_index, base, + PB_FAN_FAN1_WARNING >> (f & 1)); + pmbus_add_boolean_reg(data, "fan", "fault", + in_index, base, + PB_FAN_FAN1_FAULT >> (f & 1)); + } + in_index++; + } + } +} + +/* + * Identify chip parameters. + * This function is called for all chips. + */ +static int pmbus_identify_common(struct i2c_client *client, + struct pmbus_data *data) +{ + int vout_mode = -1, exponent; + + if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) + vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); + if (vout_mode >= 0 && vout_mode != 0xff) { + /* + * Not all chips support the VOUT_MODE command, + * so a failure to read it is not an error. + */ + switch (vout_mode >> 5) { + case 0: /* linear mode */ + if (data->info->direct[PSC_VOLTAGE_OUT]) + return -ENODEV; + + exponent = vout_mode & 0x1f; + /* and sign-extend it */ + if (exponent & 0x10) + exponent |= ~0x1f; + data->exponent = exponent; + break; + case 2: /* direct mode */ + if (!data->info->direct[PSC_VOLTAGE_OUT]) + return -ENODEV; + break; + default: + return -ENODEV; + } + } + + /* Determine maximum number of sensors, booleans, and labels */ + pmbus_find_max_attr(client, data); + pmbus_clear_fault_page(client, 0); + return 0; +} + +int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id, + struct pmbus_driver_info *info) +{ + const struct pmbus_platform_data *pdata = client->dev.platform_data; + struct pmbus_data *data; + int ret; + + if (!info) { + dev_err(&client->dev, "Missing chip information"); + return -ENODEV; + } + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WRITE_BYTE + | I2C_FUNC_SMBUS_BYTE_DATA + | I2C_FUNC_SMBUS_WORD_DATA)) + return -ENODEV; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + dev_err(&client->dev, "No memory to allocate driver data\n"); + return -ENOMEM; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + /* + * Bail out if status register or PMBus revision register + * does not exist. + */ + if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0 + || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) { + dev_err(&client->dev, + "Status or revision register not found\n"); + ret = -ENODEV; + goto out_data; + } + + if (pdata) + data->flags = pdata->flags; + data->info = info; + + pmbus_clear_faults(client); + + if (info->identify) { + ret = (*info->identify)(client, info); + if (ret < 0) { + dev_err(&client->dev, "Chip identification failed\n"); + goto out_data; + } + } + + if (info->pages <= 0 || info->pages > PMBUS_PAGES) { + dev_err(&client->dev, "Bad number of PMBus pages: %d\n", + info->pages); + ret = -EINVAL; + goto out_data; + } + /* + * Bail out if more than one page was configured, but we can not + * select the highest page. This is an indication that the wrong + * chip type was selected. Better bail out now than keep + * returning errors later on. + */ + if (info->pages > 1 && pmbus_set_page(client, info->pages - 1) < 0) { + dev_err(&client->dev, "Failed to select page %d\n", + info->pages - 1); + ret = -EINVAL; + goto out_data; + } + + ret = pmbus_identify_common(client, data); + if (ret < 0) { + dev_err(&client->dev, "Failed to identify chip capabilities\n"); + goto out_data; + } + + ret = -ENOMEM; + data->sensors = kzalloc(sizeof(struct pmbus_sensor) * data->max_sensors, + GFP_KERNEL); + if (!data->sensors) { + dev_err(&client->dev, "No memory to allocate sensor data\n"); + goto out_data; + } + + data->booleans = kzalloc(sizeof(struct pmbus_boolean) + * data->max_booleans, GFP_KERNEL); + if (!data->booleans) { + dev_err(&client->dev, "No memory to allocate boolean data\n"); + goto out_sensors; + } + + data->labels = kzalloc(sizeof(struct pmbus_label) * data->max_labels, + GFP_KERNEL); + if (!data->labels) { + dev_err(&client->dev, "No memory to allocate label data\n"); + goto out_booleans; + } + + data->attributes = kzalloc(sizeof(struct attribute *) + * data->max_attributes, GFP_KERNEL); + if (!data->attributes) { + dev_err(&client->dev, "No memory to allocate attribute data\n"); + goto out_labels; + } + + pmbus_find_attributes(client, data); + + /* + * If there are no attributes, something is wrong. + * Bail out instead of trying to register nothing. + */ + if (!data->num_attributes) { + dev_err(&client->dev, "No attributes found\n"); + ret = -ENODEV; + goto out_attributes; + } + + /* Register sysfs hooks */ + data->group.attrs = data->attributes; + ret = sysfs_create_group(&client->dev.kobj, &data->group); + if (ret) { + dev_err(&client->dev, "Failed to create sysfs entries\n"); + goto out_attributes; + } + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + ret = PTR_ERR(data->hwmon_dev); + dev_err(&client->dev, "Failed to register hwmon device\n"); + goto out_hwmon_device_register; + } + return 0; + +out_hwmon_device_register: + sysfs_remove_group(&client->dev.kobj, &data->group); +out_attributes: + kfree(data->attributes); +out_labels: + kfree(data->labels); +out_booleans: + kfree(data->booleans); +out_sensors: + kfree(data->sensors); +out_data: + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(pmbus_do_probe); + +int pmbus_do_remove(struct i2c_client *client) +{ + struct pmbus_data *data = i2c_get_clientdata(client); + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &data->group); + kfree(data->attributes); + kfree(data->labels); + kfree(data->booleans); + kfree(data->sensors); + kfree(data); + return 0; +} +EXPORT_SYMBOL_GPL(pmbus_do_remove); + +MODULE_AUTHOR("Guenter Roeck"); +MODULE_DESCRIPTION("PMBus core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index 073eabedc432..f2b377c56a3a 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -1,11 +1,12 @@ /* w83627ehf - Driver for the hardware monitoring functionality of - the Winbond W83627EHF Super-I/O chip + the Winbond W83627EHF Super-I/O chip Copyright (C) 2005 Jean Delvare <khali@linux-fr.org> Copyright (C) 2006 Yuan Mu (Winbond), - Rudolf Marek <r.marek@assembler.cz> - David Hubbard <david.c.hubbard@gmail.com> + Rudolf Marek <r.marek@assembler.cz> + David Hubbard <david.c.hubbard@gmail.com> Daniel J Blueman <daniel.blueman@gmail.com> + Copyright (C) 2010 Sheng-Yuan Huang (Nuvoton) (PS00) Shamelessly ripped from the w83627hf driver Copyright (C) 2003 Mark Studebaker @@ -35,11 +36,13 @@ Chip #vin #fan #pwm #temp chip IDs man ID w83627ehf 10 5 4 3 0x8850 0x88 0x5ca3 - 0x8860 0xa1 + 0x8860 0xa1 w83627dhg 9 5 4 3 0xa020 0xc1 0x5ca3 w83627dhg-p 9 5 4 3 0xb070 0xc1 0x5ca3 w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3 - w83667hg-b 9 5 3 3 0xb350 0xc1 0x5ca3 + w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3 + nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3 + nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -58,21 +61,28 @@ #include <linux/io.h> #include "lm75.h" -enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b }; +enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83667hg, w83667hg_b, nct6775, + nct6776 }; /* used to set data->name = w83627ehf_device_names[data->sio_kind] */ -static const char * w83627ehf_device_names[] = { +static const char * const w83627ehf_device_names[] = { "w83627ehf", "w83627dhg", "w83627dhg", "w83667hg", "w83667hg", + "nct6775", + "nct6776", }; static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); +static unsigned short fan_debounce; +module_param(fan_debounce, ushort, 0); +MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); + #define DRVNAME "w83627ehf" /* @@ -80,7 +90,7 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID"); */ #define W83627EHF_LD_HWM 0x0b -#define W83667HG_LD_VID 0x0d +#define W83667HG_LD_VID 0x0d #define SIO_REG_LDSEL 0x07 /* Logical device select */ #define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ @@ -94,8 +104,10 @@ MODULE_PARM_DESC(force_id, "Override the detected device ID"); #define SIO_W83627EHG_ID 0x8860 #define SIO_W83627DHG_ID 0xa020 #define SIO_W83627DHG_P_ID 0xb070 -#define SIO_W83667HG_ID 0xa510 +#define SIO_W83667HG_ID 0xa510 #define SIO_W83667HG_B_ID 0xb350 +#define SIO_NCT6775_ID 0xb470 +#define SIO_NCT6776_ID 0xc330 #define SIO_ID_MASK 0xFFF0 static inline void @@ -138,7 +150,7 @@ superio_exit(int ioreg) * ISA constants */ -#define IOREGION_ALIGNMENT ~7 +#define IOREGION_ALIGNMENT (~7) #define IOREGION_OFFSET 5 #define IOREGION_LENGTH 2 #define ADDR_REG_OFFSET 0 @@ -164,13 +176,10 @@ static const u16 W83627EHF_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d, 0x3e, 0x55c }; #define W83627EHF_REG_IN(nr) ((nr < 7) ? (0x20 + (nr)) : \ (0x550 + (nr) - 7)) -#define W83627EHF_REG_TEMP1 0x27 -#define W83627EHF_REG_TEMP1_HYST 0x3a -#define W83627EHF_REG_TEMP1_OVER 0x39 -static const u16 W83627EHF_REG_TEMP[] = { 0x150, 0x250 }; -static const u16 W83627EHF_REG_TEMP_HYST[] = { 0x153, 0x253 }; -static const u16 W83627EHF_REG_TEMP_OVER[] = { 0x155, 0x255 }; -static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 }; +static const u16 W83627EHF_REG_TEMP[] = { 0x27, 0x150, 0x250, 0x7e }; +static const u16 W83627EHF_REG_TEMP_HYST[] = { 0x3a, 0x153, 0x253, 0 }; +static const u16 W83627EHF_REG_TEMP_OVER[] = { 0x39, 0x155, 0x255, 0 }; +static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 }; /* Fan clock dividers are spread over the following five registers */ #define W83627EHF_REG_FANDIV1 0x47 @@ -179,6 +188,11 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0x152, 0x252 }; #define W83627EHF_REG_DIODE 0x59 #define W83627EHF_REG_SMI_OVT 0x4C +/* NCT6775F has its own fan divider registers */ +#define NCT6775_REG_FANDIV1 0x506 +#define NCT6775_REG_FANDIV2 0x507 +#define NCT6775_REG_FAN_DEBOUNCE 0xf0 + #define W83627EHF_REG_ALARM1 0x459 #define W83627EHF_REG_ALARM2 0x45A #define W83627EHF_REG_ALARM3 0x45B @@ -199,22 +213,123 @@ static const u8 W83627EHF_PWM_MODE_SHIFT[] = { 0, 1, 0, 6 }; static const u8 W83627EHF_PWM_ENABLE_SHIFT[] = { 2, 4, 1, 4 }; /* FAN Duty Cycle, be used to control */ -static const u8 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 }; -static const u8 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 }; +static const u16 W83627EHF_REG_PWM[] = { 0x01, 0x03, 0x11, 0x61 }; +static const u16 W83627EHF_REG_TARGET[] = { 0x05, 0x06, 0x13, 0x63 }; static const u8 W83627EHF_REG_TOLERANCE[] = { 0x07, 0x07, 0x14, 0x62 }; /* Advanced Fan control, some values are common for all fans */ -static const u8 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 }; -static const u8 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 }; -static const u8 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 }; +static const u16 W83627EHF_REG_FAN_START_OUTPUT[] = { 0x0a, 0x0b, 0x16, 0x65 }; +static const u16 W83627EHF_REG_FAN_STOP_OUTPUT[] = { 0x08, 0x09, 0x15, 0x64 }; +static const u16 W83627EHF_REG_FAN_STOP_TIME[] = { 0x0c, 0x0d, 0x17, 0x66 }; -static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_COMMON[] +static const u16 W83627EHF_REG_FAN_MAX_OUTPUT_COMMON[] = { 0xff, 0x67, 0xff, 0x69 }; -static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_COMMON[] +static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_COMMON[] = { 0xff, 0x68, 0xff, 0x6a }; -static const u8 W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B[] = { 0x67, 0x69, 0x6b }; -static const u8 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] = { 0x68, 0x6a, 0x6c }; +static const u16 W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B[] = { 0x67, 0x69, 0x6b }; +static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] + = { 0x68, 0x6a, 0x6c }; + +static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 }; +static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 }; +static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 }; +static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 }; +static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 }; +static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 }; +static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a }; +static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b }; +static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; +static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642}; + +static const u16 NCT6775_REG_TEMP[] + = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d }; +static const u16 NCT6775_REG_TEMP_CONFIG[] + = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A }; +static const u16 NCT6775_REG_TEMP_HYST[] + = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D }; +static const u16 NCT6775_REG_TEMP_OVER[] + = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C }; +static const u16 NCT6775_REG_TEMP_SOURCE[] + = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 }; + +static const char *const w83667hg_b_temp_label[] = { + "SYSTIN", + "CPUTIN", + "AUXTIN", + "AMDTSI", + "PECI Agent 1", + "PECI Agent 2", + "PECI Agent 3", + "PECI Agent 4" +}; + +static const char *const nct6775_temp_label[] = { + "", + "SYSTIN", + "CPUTIN", + "AUXTIN", + "AMD SB-TSI", + "PECI Agent 0", + "PECI Agent 1", + "PECI Agent 2", + "PECI Agent 3", + "PECI Agent 4", + "PECI Agent 5", + "PECI Agent 6", + "PECI Agent 7", + "PCH_CHIP_CPU_MAX_TEMP", + "PCH_CHIP_TEMP", + "PCH_CPU_TEMP", + "PCH_MCH_TEMP", + "PCH_DIM0_TEMP", + "PCH_DIM1_TEMP", + "PCH_DIM2_TEMP", + "PCH_DIM3_TEMP" +}; + +static const char *const nct6776_temp_label[] = { + "", + "SYSTIN", + "CPUTIN", + "AUXTIN", + "SMBUSMASTER 0", + "SMBUSMASTER 1", + "SMBUSMASTER 2", + "SMBUSMASTER 3", + "SMBUSMASTER 4", + "SMBUSMASTER 5", + "SMBUSMASTER 6", + "SMBUSMASTER 7", + "PECI Agent 0", + "PECI Agent 1", + "PCH_CHIP_CPU_MAX_TEMP", + "PCH_CHIP_TEMP", + "PCH_CPU_TEMP", + "PCH_MCH_TEMP", + "PCH_DIM0_TEMP", + "PCH_DIM1_TEMP", + "PCH_DIM2_TEMP", + "PCH_DIM3_TEMP", + "BYTE_TEMP" +}; + +#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP) + +static inline int is_word_sized(u16 reg) +{ + return ((((reg & 0xff00) == 0x100 + || (reg & 0xff00) == 0x200) + && ((reg & 0x00ff) == 0x50 + || (reg & 0x00ff) == 0x53 + || (reg & 0x00ff) == 0x55)) + || (reg & 0xfff0) == 0x630 + || reg == 0x640 || reg == 0x642 + || ((reg & 0xfff0) == 0x650 + && (reg & 0x000f) >= 0x06) + || reg == 0x73 || reg == 0x75 || reg == 0x77 + ); +} /* * Conversions @@ -232,12 +347,36 @@ static inline u8 step_time_to_reg(unsigned int msec, u8 mode) (msec + 200) / 400), 1, 255); } -static inline unsigned int -fan_from_reg(u8 reg, unsigned int div) +static unsigned int fan_from_reg8(u16 reg, unsigned int divreg) { if (reg == 0 || reg == 255) return 0; - return 1350000U / (reg * div); + return 1350000U / (reg << divreg); +} + +static unsigned int fan_from_reg13(u16 reg, unsigned int divreg) +{ + if ((reg & 0xff1f) == 0xff1f) + return 0; + + reg = (reg & 0x1f) | ((reg & 0xff00) >> 3); + + if (reg == 0) + return 0; + + return 1350000U / reg; +} + +static unsigned int fan_from_reg16(u16 reg, unsigned int divreg) +{ + if (reg == 0 || reg == 0xffff) + return 0; + + /* + * Even though the registers are 16 bit wide, the fan divisor + * still applies. + */ + return 1350000U / (reg << divreg); } static inline unsigned int @@ -247,21 +386,19 @@ div_from_reg(u8 reg) } static inline int -temp1_from_reg(s8 reg) +temp_from_reg(u16 reg, s16 regval) { - return reg * 1000; + if (is_word_sized(reg)) + return LM75_TEMP_FROM_REG(regval); + return regval * 1000; } -static inline s8 -temp1_to_reg(long temp, int min, int max) +static inline u16 +temp_to_reg(u16 reg, long temp) { - if (temp <= min) - return min / 1000; - if (temp >= max) - return max / 1000; - if (temp < 0) - return (temp - 500) / 1000; - return (temp + 500) / 1000; + if (is_word_sized(reg)) + return LM75_TEMP_TO_REG(temp); + return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000); } /* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */ @@ -275,7 +412,8 @@ static inline long in_from_reg(u8 reg, u8 nr) static inline u8 in_to_reg(u32 val, u8 nr) { - return SENSORS_LIMIT(((val + (scale_in[nr] / 2)) / scale_in[nr]), 0, 255); + return SENSORS_LIMIT(((val + (scale_in[nr] / 2)) / scale_in[nr]), 0, + 255); } /* @@ -289,38 +427,57 @@ struct w83627ehf_data { struct device *hwmon_dev; struct mutex lock; - const u8 *REG_FAN_START_OUTPUT; - const u8 *REG_FAN_STOP_OUTPUT; - const u8 *REG_FAN_MAX_OUTPUT; - const u8 *REG_FAN_STEP_OUTPUT; + u16 reg_temp[NUM_REG_TEMP]; + u16 reg_temp_over[NUM_REG_TEMP]; + u16 reg_temp_hyst[NUM_REG_TEMP]; + u16 reg_temp_config[NUM_REG_TEMP]; + u8 temp_src[NUM_REG_TEMP]; + const char * const *temp_label; + + const u16 *REG_PWM; + const u16 *REG_TARGET; + const u16 *REG_FAN; + const u16 *REG_FAN_MIN; + const u16 *REG_FAN_START_OUTPUT; + const u16 *REG_FAN_STOP_OUTPUT; + const u16 *REG_FAN_STOP_TIME; + const u16 *REG_FAN_MAX_OUTPUT; + const u16 *REG_FAN_STEP_OUTPUT; + + unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg); + unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg); struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ /* Register values */ + u8 bank; /* current register bank */ u8 in_num; /* number of in inputs we have */ u8 in[10]; /* Register value */ u8 in_max[10]; /* Register value */ u8 in_min[10]; /* Register value */ - u8 fan[5]; - u8 fan_min[5]; + unsigned int rpm[5]; + u16 fan_min[5]; u8 fan_div[5]; u8 has_fan; /* some fan inputs can be disabled */ + u8 has_fan_min; /* some fans don't have min register */ + bool has_fan_div; u8 temp_type[3]; - s8 temp1; - s8 temp1_max; - s8 temp1_max_hyst; - s16 temp[2]; - s16 temp_max[2]; - s16 temp_max_hyst[2]; + s16 temp[9]; + s16 temp_max[9]; + s16 temp_max_hyst[9]; u32 alarms; u8 pwm_mode[4]; /* 0->DC variable voltage, 1->PWM variable duty cycle */ u8 pwm_enable[4]; /* 1->manual 2->thermal cruise mode (also called SmartFan I) 3->fan speed cruise mode - 4->variable thermal cruise (also called SmartFan III) */ + 4->variable thermal cruise (also called + SmartFan III) + 5->enhanced variable thermal cruise (also called + SmartFan IV) */ + u8 pwm_enable_orig[4]; /* original value of pwm_enable */ u8 pwm_num; /* number of pwm */ u8 pwm[4]; u8 target_temp[4]; @@ -335,7 +492,7 @@ struct w83627ehf_data { u8 vid; u8 vrm; - u8 temp3_disable; + u16 have_temp; u8 in6_skip; }; @@ -344,30 +501,19 @@ struct w83627ehf_sio_data { enum kinds kind; }; -static inline int is_word_sized(u16 reg) -{ - return (((reg & 0xff00) == 0x100 - || (reg & 0xff00) == 0x200) - && ((reg & 0x00ff) == 0x50 - || (reg & 0x00ff) == 0x53 - || (reg & 0x00ff) == 0x55)); -} - -/* Registers 0x50-0x5f are banked */ +/* + * On older chips, only registers 0x50-0x5f are banked. + * On more recent chips, all registers are banked. + * Assume that is the case and set the bank number for each access. + * Cache the bank number so it only needs to be set if it changes. + */ static inline void w83627ehf_set_bank(struct w83627ehf_data *data, u16 reg) { - if ((reg & 0x00f0) == 0x50) { + u8 bank = reg >> 8; + if (data->bank != bank) { outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET); - outb_p(reg >> 8, data->addr + DATA_REG_OFFSET); - } -} - -/* Not strictly necessary, but play it safe for now */ -static inline void w83627ehf_reset_bank(struct w83627ehf_data *data, u16 reg) -{ - if (reg & 0xff00) { - outb_p(W83627EHF_REG_BANK, data->addr + ADDR_REG_OFFSET); - outb_p(0, data->addr + DATA_REG_OFFSET); + outb_p(bank, data->addr + DATA_REG_OFFSET); + data->bank = bank; } } @@ -385,14 +531,13 @@ static u16 w83627ehf_read_value(struct w83627ehf_data *data, u16 reg) data->addr + ADDR_REG_OFFSET); res = (res << 8) + inb_p(data->addr + DATA_REG_OFFSET); } - w83627ehf_reset_bank(data, reg); mutex_unlock(&data->lock); - return res; } -static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, u16 value) +static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, + u16 value) { int word_sized = is_word_sized(reg); @@ -406,13 +551,40 @@ static int w83627ehf_write_value(struct w83627ehf_data *data, u16 reg, u16 value data->addr + ADDR_REG_OFFSET); } outb_p(value & 0xff, data->addr + DATA_REG_OFFSET); - w83627ehf_reset_bank(data, reg); mutex_unlock(&data->lock); return 0; } /* This function assumes that the caller holds data->update_lock */ +static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr) +{ + u8 reg; + + switch (nr) { + case 0: + reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70) + | (data->fan_div[0] & 0x7); + w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg); + break; + case 1: + reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7) + | ((data->fan_div[1] << 4) & 0x70); + w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg); + case 2: + reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70) + | (data->fan_div[2] & 0x7); + w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg); + break; + case 3: + reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7) + | ((data->fan_div[3] << 4) & 0x70); + w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg); + break; + } +} + +/* This function assumes that the caller holds data->update_lock */ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr) { u8 reg; @@ -463,6 +635,32 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr) } } +static void w83627ehf_write_fan_div_common(struct device *dev, + struct w83627ehf_data *data, int nr) +{ + struct w83627ehf_sio_data *sio_data = dev->platform_data; + + if (sio_data->kind == nct6776) + ; /* no dividers, do nothing */ + else if (sio_data->kind == nct6775) + nct6775_write_fan_div(data, nr); + else + w83627ehf_write_fan_div(data, nr); +} + +static void nct6775_update_fan_div(struct w83627ehf_data *data) +{ + u8 i; + + i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1); + data->fan_div[0] = i & 0x7; + data->fan_div[1] = (i & 0x70) >> 4; + i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2); + data->fan_div[2] = i & 0x7; + if (data->has_fan & (1<<3)) + data->fan_div[3] = (i & 0x70) >> 4; +} + static void w83627ehf_update_fan_div(struct w83627ehf_data *data) { int i; @@ -488,10 +686,79 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data) } } +static void w83627ehf_update_fan_div_common(struct device *dev, + struct w83627ehf_data *data) +{ + struct w83627ehf_sio_data *sio_data = dev->platform_data; + + if (sio_data->kind == nct6776) + ; /* no dividers, do nothing */ + else if (sio_data->kind == nct6775) + nct6775_update_fan_div(data); + else + w83627ehf_update_fan_div(data); +} + +static void nct6775_update_pwm(struct w83627ehf_data *data) +{ + int i; + int pwmcfg, fanmodecfg; + + for (i = 0; i < data->pwm_num; i++) { + pwmcfg = w83627ehf_read_value(data, + W83627EHF_REG_PWM_ENABLE[i]); + fanmodecfg = w83627ehf_read_value(data, + NCT6775_REG_FAN_MODE[i]); + data->pwm_mode[i] = + ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; + data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1; + data->tolerance[i] = fanmodecfg & 0x0f; + data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]); + } +} + +static void w83627ehf_update_pwm(struct w83627ehf_data *data) +{ + int i; + int pwmcfg = 0, tolerance = 0; /* shut up the compiler */ + + for (i = 0; i < data->pwm_num; i++) { + if (!(data->has_fan & (1 << i))) + continue; + + /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */ + if (i != 1) { + pwmcfg = w83627ehf_read_value(data, + W83627EHF_REG_PWM_ENABLE[i]); + tolerance = w83627ehf_read_value(data, + W83627EHF_REG_TOLERANCE[i]); + } + data->pwm_mode[i] = + ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; + data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i]) + & 3) + 1; + data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]); + + data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f; + } +} + +static void w83627ehf_update_pwm_common(struct device *dev, + struct w83627ehf_data *data) +{ + struct w83627ehf_sio_data *sio_data = dev->platform_data; + + if (sio_data->kind == nct6775 || sio_data->kind == nct6776) + nct6775_update_pwm(data); + else + w83627ehf_update_pwm(data); +} + static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) { struct w83627ehf_data *data = dev_get_drvdata(dev); - int pwmcfg = 0, tolerance = 0; /* shut up the compiler */ + struct w83627ehf_sio_data *sio_data = dev->platform_data; + int i; mutex_lock(&data->update_lock); @@ -499,7 +766,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) if (time_after(jiffies, data->last_updated + HZ + HZ/2) || !data->valid) { /* Fan clock dividers */ - w83627ehf_update_fan_div(data); + w83627ehf_update_fan_div_common(dev, data); /* Measured voltages and limits */ for (i = 0; i < data->in_num; i++) { @@ -513,92 +780,90 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) /* Measured fan speeds and limits */ for (i = 0; i < 5; i++) { + u16 reg; + if (!(data->has_fan & (1 << i))) continue; - data->fan[i] = w83627ehf_read_value(data, - W83627EHF_REG_FAN[i]); - data->fan_min[i] = w83627ehf_read_value(data, - W83627EHF_REG_FAN_MIN[i]); + reg = w83627ehf_read_value(data, data->REG_FAN[i]); + data->rpm[i] = data->fan_from_reg(reg, + data->fan_div[i]); + + if (data->has_fan_min & (1 << i)) + data->fan_min[i] = w83627ehf_read_value(data, + data->REG_FAN_MIN[i]); /* If we failed to measure the fan speed and clock divider can be increased, let's try that for next time */ - if (data->fan[i] == 0xff - && data->fan_div[i] < 0x07) { - dev_dbg(dev, "Increasing fan%d " + if (data->has_fan_div + && (reg >= 0xff || (sio_data->kind == nct6775 + && reg == 0x00)) + && data->fan_div[i] < 0x07) { + dev_dbg(dev, "Increasing fan%d " "clock divider from %u to %u\n", i + 1, div_from_reg(data->fan_div[i]), div_from_reg(data->fan_div[i] + 1)); data->fan_div[i]++; - w83627ehf_write_fan_div(data, i); + w83627ehf_write_fan_div_common(dev, data, i); /* Preserve min limit if possible */ - if (data->fan_min[i] >= 2 + if ((data->has_fan_min & (1 << i)) + && data->fan_min[i] >= 2 && data->fan_min[i] != 255) w83627ehf_write_value(data, - W83627EHF_REG_FAN_MIN[i], + data->REG_FAN_MIN[i], (data->fan_min[i] /= 2)); } } + w83627ehf_update_pwm_common(dev, data); + for (i = 0; i < data->pwm_num; i++) { if (!(data->has_fan & (1 << i))) continue; - /* pwmcfg, tolerance mapped for i=0, i=1 to same reg */ - if (i != 1) { - pwmcfg = w83627ehf_read_value(data, - W83627EHF_REG_PWM_ENABLE[i]); - tolerance = w83627ehf_read_value(data, - W83627EHF_REG_TOLERANCE[i]); - } - data->pwm_mode[i] = - ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) - ? 0 : 1; - data->pwm_enable[i] = - ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i]) - & 3) + 1; - data->pwm[i] = w83627ehf_read_value(data, - W83627EHF_REG_PWM[i]); - data->fan_start_output[i] = w83627ehf_read_value(data, - W83627EHF_REG_FAN_START_OUTPUT[i]); - data->fan_stop_output[i] = w83627ehf_read_value(data, - W83627EHF_REG_FAN_STOP_OUTPUT[i]); - data->fan_stop_time[i] = w83627ehf_read_value(data, - W83627EHF_REG_FAN_STOP_TIME[i]); - - if (data->REG_FAN_MAX_OUTPUT[i] != 0xff) + data->fan_start_output[i] = + w83627ehf_read_value(data, + data->REG_FAN_START_OUTPUT[i]); + data->fan_stop_output[i] = + w83627ehf_read_value(data, + data->REG_FAN_STOP_OUTPUT[i]); + data->fan_stop_time[i] = + w83627ehf_read_value(data, + data->REG_FAN_STOP_TIME[i]); + + if (data->REG_FAN_MAX_OUTPUT && + data->REG_FAN_MAX_OUTPUT[i] != 0xff) data->fan_max_output[i] = w83627ehf_read_value(data, - data->REG_FAN_MAX_OUTPUT[i]); + data->REG_FAN_MAX_OUTPUT[i]); - if (data->REG_FAN_STEP_OUTPUT[i] != 0xff) + if (data->REG_FAN_STEP_OUTPUT && + data->REG_FAN_STEP_OUTPUT[i] != 0xff) data->fan_step_output[i] = w83627ehf_read_value(data, - data->REG_FAN_STEP_OUTPUT[i]); + data->REG_FAN_STEP_OUTPUT[i]); data->target_temp[i] = w83627ehf_read_value(data, - W83627EHF_REG_TARGET[i]) & + data->REG_TARGET[i]) & (data->pwm_mode[i] == 1 ? 0x7f : 0xff); - data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) - & 0x0f; } /* Measured temperatures and limits */ - data->temp1 = w83627ehf_read_value(data, - W83627EHF_REG_TEMP1); - data->temp1_max = w83627ehf_read_value(data, - W83627EHF_REG_TEMP1_OVER); - data->temp1_max_hyst = w83627ehf_read_value(data, - W83627EHF_REG_TEMP1_HYST); - for (i = 0; i < 2; i++) { + for (i = 0; i < NUM_REG_TEMP; i++) { + if (!(data->have_temp & (1 << i))) + continue; data->temp[i] = w83627ehf_read_value(data, - W83627EHF_REG_TEMP[i]); - data->temp_max[i] = w83627ehf_read_value(data, - W83627EHF_REG_TEMP_OVER[i]); - data->temp_max_hyst[i] = w83627ehf_read_value(data, - W83627EHF_REG_TEMP_HYST[i]); + data->reg_temp[i]); + if (data->reg_temp_over[i]) + data->temp_max[i] + = w83627ehf_read_value(data, + data->reg_temp_over[i]); + if (data->reg_temp_hyst[i]) + data->temp_max_hyst[i] + = w83627ehf_read_value(data, + data->reg_temp_hyst[i]); } data->alarms = w83627ehf_read_value(data, @@ -625,7 +890,8 @@ show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + struct sensor_device_attribute *sensor_attr = \ + to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr)); \ } @@ -635,14 +901,18 @@ show_in_reg(in_max) #define store_in_reg(REG, reg) \ static ssize_t \ -store_in_##reg (struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ +store_in_##reg(struct device *dev, struct device_attribute *attr, \ + const char *buf, size_t count) \ { \ struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + struct sensor_device_attribute *sensor_attr = \ + to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ - u32 val = simple_strtoul(buf, NULL, 10); \ - \ + unsigned long val; \ + int err; \ + err = strict_strtoul(buf, 10, &val); \ + if (err < 0) \ + return err; \ mutex_lock(&data->update_lock); \ data->in_##reg[nr] = in_to_reg(val, nr); \ w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \ @@ -654,7 +924,8 @@ store_in_##reg (struct device *dev, struct device_attribute *attr, \ store_in_reg(MIN, min) store_in_reg(MAX, max) -static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, + char *buf) { struct w83627ehf_data *data = w83627ehf_update_device(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); @@ -689,45 +960,50 @@ static struct sensor_device_attribute sda_in_alarm[] = { }; static struct sensor_device_attribute sda_in_min[] = { - SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0), - SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1), - SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2), - SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3), - SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4), - SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5), - SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6), - SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7), - SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8), - SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9), + SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0), + SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1), + SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2), + SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3), + SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4), + SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5), + SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6), + SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7), + SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8), + SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9), }; static struct sensor_device_attribute sda_in_max[] = { - SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0), - SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1), - SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2), - SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3), - SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4), - SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5), - SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6), - SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7), - SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8), - SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9), + SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0), + SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1), + SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2), + SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3), + SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4), + SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5), + SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6), + SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7), + SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8), + SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9), }; -#define show_fan_reg(reg) \ -static ssize_t \ -show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%d\n", \ - fan_from_reg(data->reg[nr], \ - div_from_reg(data->fan_div[nr]))); \ +static ssize_t +show_fan(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct w83627ehf_data *data = w83627ehf_update_device(dev); + struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); + int nr = sensor_attr->index; + return sprintf(buf, "%d\n", data->rpm[nr]); +} + +static ssize_t +show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct w83627ehf_data *data = w83627ehf_update_device(dev); + struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); + int nr = sensor_attr->index; + return sprintf(buf, "%d\n", + data->fan_from_reg_min(data->fan_min[nr], + data->fan_div[nr])); } -show_fan_reg(fan); -show_fan_reg(fan_min); static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr, @@ -746,11 +1022,32 @@ store_fan_min(struct device *dev, struct device_attribute *attr, struct w83627ehf_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; - unsigned int val = simple_strtoul(buf, NULL, 10); + unsigned long val; + int err; unsigned int reg; u8 new_div; + err = strict_strtoul(buf, 10, &val); + if (err < 0) + return err; + mutex_lock(&data->update_lock); + if (!data->has_fan_div) { + /* + * Only NCT6776F for now, so we know that this is a 13 bit + * register + */ + if (!val) { + val = 0xff1f; + } else { + if (val > 1350000U) + val = 135000U; + val = 1350000U / val; + val = (val & 0x1f) | ((val << 3) & 0xff00); + } + data->fan_min[nr] = val; + goto done; /* Leave fan divider alone */ + } if (!val) { /* No min limit, alarm disabled */ data->fan_min[nr] = 255; @@ -761,15 +1058,17 @@ store_fan_min(struct device *dev, struct device_attribute *attr, even with the highest divider (128) */ data->fan_min[nr] = 254; new_div = 7; /* 128 == (1 << 7) */ - dev_warn(dev, "fan%u low limit %u below minimum %u, set to " - "minimum\n", nr + 1, val, fan_from_reg(254, 128)); + dev_warn(dev, "fan%u low limit %lu below minimum %u, set to " + "minimum\n", nr + 1, val, + data->fan_from_reg_min(254, 7)); } else if (!reg) { /* Speed above this value cannot possibly be represented, even with the lowest divider (1) */ data->fan_min[nr] = 1; new_div = 0; /* 1 == (1 << 0) */ - dev_warn(dev, "fan%u low limit %u above maximum %u, set to " - "maximum\n", nr + 1, val, fan_from_reg(1, 1)); + dev_warn(dev, "fan%u low limit %lu above maximum %u, set to " + "maximum\n", nr + 1, val, + data->fan_from_reg_min(1, 0)); } else { /* Automatically pick the best divider, i.e. the one such that the min limit will correspond to a register value @@ -785,25 +1084,16 @@ store_fan_min(struct device *dev, struct device_attribute *attr, /* Write both the fan clock divider (if it changed) and the new fan min (unconditionally) */ if (new_div != data->fan_div[nr]) { - /* Preserve the fan speed reading */ - if (data->fan[nr] != 0xff) { - if (new_div > data->fan_div[nr]) - data->fan[nr] >>= new_div - data->fan_div[nr]; - else if (data->fan[nr] & 0x80) - data->fan[nr] = 0xff; - else - data->fan[nr] <<= data->fan_div[nr] - new_div; - } - dev_dbg(dev, "fan%u clock divider changed from %u to %u\n", nr + 1, div_from_reg(data->fan_div[nr]), div_from_reg(new_div)); data->fan_div[nr] = new_div; - w83627ehf_write_fan_div(data, nr); + w83627ehf_write_fan_div_common(dev, data, nr); /* Give the chip time to sample a new speed value */ data->last_updated = jiffies; } - w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[nr], +done: + w83627ehf_write_value(data, data->REG_FAN_MIN[nr], data->fan_min[nr]); mutex_unlock(&data->update_lock); @@ -847,70 +1137,54 @@ static struct sensor_device_attribute sda_fan_div[] = { SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4), }; -#define show_temp1_reg(reg) \ -static ssize_t \ -show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - return sprintf(buf, "%d\n", temp1_from_reg(data->reg)); \ -} -show_temp1_reg(temp1); -show_temp1_reg(temp1_max); -show_temp1_reg(temp1_max_hyst); - -#define store_temp1_reg(REG, reg) \ -static ssize_t \ -store_temp1_##reg(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ -{ \ - struct w83627ehf_data *data = dev_get_drvdata(dev); \ - long val = simple_strtol(buf, NULL, 10); \ - \ - mutex_lock(&data->update_lock); \ - data->temp1_##reg = temp1_to_reg(val, -128000, 127000); \ - w83627ehf_write_value(data, W83627EHF_REG_TEMP1_##REG, \ - data->temp1_##reg); \ - mutex_unlock(&data->update_lock); \ - return count; \ +static ssize_t +show_temp_label(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct w83627ehf_data *data = w83627ehf_update_device(dev); + struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); + int nr = sensor_attr->index; + return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]); } -store_temp1_reg(OVER, max); -store_temp1_reg(HYST, max_hyst); -#define show_temp_reg(reg) \ +#define show_temp_reg(addr, reg) \ static ssize_t \ show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + struct sensor_device_attribute *sensor_attr = \ + to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ return sprintf(buf, "%d\n", \ - LM75_TEMP_FROM_REG(data->reg[nr])); \ + temp_from_reg(data->addr[nr], data->reg[nr])); \ } -show_temp_reg(temp); -show_temp_reg(temp_max); -show_temp_reg(temp_max_hyst); +show_temp_reg(reg_temp, temp); +show_temp_reg(reg_temp_over, temp_max); +show_temp_reg(reg_temp_hyst, temp_max_hyst); -#define store_temp_reg(REG, reg) \ +#define store_temp_reg(addr, reg) \ static ssize_t \ store_##reg(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + struct sensor_device_attribute *sensor_attr = \ + to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ - long val = simple_strtol(buf, NULL, 10); \ - \ + int err; \ + long val; \ + err = strict_strtol(buf, 10, &val); \ + if (err < 0) \ + return err; \ mutex_lock(&data->update_lock); \ - data->reg[nr] = LM75_TEMP_TO_REG(val); \ - w83627ehf_write_value(data, W83627EHF_REG_TEMP_##REG[nr], \ + data->reg[nr] = temp_to_reg(data->addr[nr], val); \ + w83627ehf_write_value(data, data->addr[nr], \ data->reg[nr]); \ mutex_unlock(&data->update_lock); \ return count; \ } -store_temp_reg(OVER, temp_max); -store_temp_reg(HYST, temp_max_hyst); +store_temp_reg(reg_temp_over, temp_max); +store_temp_reg(reg_temp_hyst, temp_max_hyst); static ssize_t show_temp_type(struct device *dev, struct device_attribute *attr, char *buf) @@ -922,27 +1196,69 @@ show_temp_type(struct device *dev, struct device_attribute *attr, char *buf) } static struct sensor_device_attribute sda_temp_input[] = { - SENSOR_ATTR(temp1_input, S_IRUGO, show_temp1, NULL, 0), - SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 0), - SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 1), + SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0), + SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1), + SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2), + SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3), + SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4), + SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5), + SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6), + SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7), + SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8), +}; + +static struct sensor_device_attribute sda_temp_label[] = { + SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0), + SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1), + SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2), + SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3), + SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4), + SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5), + SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6), + SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7), + SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8), }; static struct sensor_device_attribute sda_temp_max[] = { - SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp1_max, - store_temp1_max, 0), - SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max, + SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max, store_temp_max, 0), - SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max, + SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max, store_temp_max, 1), + SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max, + store_temp_max, 2), + SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max, + store_temp_max, 3), + SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max, + store_temp_max, 4), + SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max, + store_temp_max, 5), + SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max, + store_temp_max, 6), + SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max, + store_temp_max, 7), + SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max, + store_temp_max, 8), }; static struct sensor_device_attribute sda_temp_max_hyst[] = { - SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp1_max_hyst, - store_temp1_max_hyst, 0), - SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 0), - SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, store_temp_max_hyst, 1), + SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + store_temp_max_hyst, 2), + SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + store_temp_max_hyst, 3), + SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + store_temp_max_hyst, 4), + SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + store_temp_max_hyst, 5), + SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + store_temp_max_hyst, 6), + SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + store_temp_max_hyst, 7), + SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, + store_temp_max_hyst, 8), }; static struct sensor_device_attribute sda_temp_alarm[] = { @@ -958,11 +1274,12 @@ static struct sensor_device_attribute sda_temp_type[] = { }; #define show_pwm_reg(reg) \ -static ssize_t show_##reg (struct device *dev, struct device_attribute *attr, \ - char *buf) \ +static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ + char *buf) \ { \ struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + struct sensor_device_attribute *sensor_attr = \ + to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ return sprintf(buf, "%d\n", data->reg[nr]); \ } @@ -978,9 +1295,14 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr, struct w83627ehf_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; - u32 val = simple_strtoul(buf, NULL, 10); + unsigned long val; + int err; u16 reg; + err = strict_strtoul(buf, 10, &val); + if (err < 0) + return err; + if (val > 1) return -EINVAL; mutex_lock(&data->update_lock); @@ -1001,11 +1323,18 @@ store_pwm(struct device *dev, struct device_attribute *attr, struct w83627ehf_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; - u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 0, 255); + unsigned long val; + int err; + + err = strict_strtoul(buf, 10, &val); + if (err < 0) + return err; + + val = SENSORS_LIMIT(val, 0, 255); mutex_lock(&data->update_lock); data->pwm[nr] = val; - w83627ehf_write_value(data, W83627EHF_REG_PWM[nr], val); + w83627ehf_write_value(data, data->REG_PWM[nr], val); mutex_unlock(&data->update_lock); return count; } @@ -1015,19 +1344,38 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627ehf_data *data = dev_get_drvdata(dev); + struct w83627ehf_sio_data *sio_data = dev->platform_data; struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; - u32 val = simple_strtoul(buf, NULL, 10); + unsigned long val; + int err; u16 reg; - if (!val || (val > 4)) + err = strict_strtoul(buf, 10, &val); + if (err < 0) + return err; + + if (!val || (val > 4 && val != data->pwm_enable_orig[nr])) return -EINVAL; + /* SmartFan III mode is not supported on NCT6776F */ + if (sio_data->kind == nct6776 && val == 4) + return -EINVAL; + mutex_lock(&data->update_lock); - reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); data->pwm_enable[nr] = val; - reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]); - reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr]; - w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg); + if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { + reg = w83627ehf_read_value(data, + NCT6775_REG_FAN_MODE[nr]); + reg &= 0x0f; + reg |= (val - 1) << 4; + w83627ehf_write_value(data, + NCT6775_REG_FAN_MODE[nr], reg); + } else { + reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); + reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]); + reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr]; + w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg); + } mutex_unlock(&data->update_lock); return count; } @@ -1038,9 +1386,10 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + struct sensor_device_attribute *sensor_attr = \ + to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ - return sprintf(buf, "%d\n", temp1_from_reg(data->reg[nr])); \ + return sprintf(buf, "%d\n", data->reg[nr] * 1000); \ } show_tol_temp(tolerance) @@ -1053,11 +1402,18 @@ store_target_temp(struct device *dev, struct device_attribute *attr, struct w83627ehf_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; - u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 127000); + long val; + int err; + + err = strict_strtol(buf, 10, &val); + if (err < 0) + return err; + + val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 127); mutex_lock(&data->update_lock); data->target_temp[nr] = val; - w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val); + w83627ehf_write_value(data, data->REG_TARGET[nr], val); mutex_unlock(&data->update_lock); return count; } @@ -1067,20 +1423,37 @@ store_tolerance(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627ehf_data *data = dev_get_drvdata(dev); + struct w83627ehf_sio_data *sio_data = dev->platform_data; struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; u16 reg; + long val; + int err; + + err = strict_strtol(buf, 10, &val); + if (err < 0) + return err; + /* Limit the temp to 0C - 15C */ - u8 val = temp1_to_reg(simple_strtoul(buf, NULL, 10), 0, 15000); + val = SENSORS_LIMIT(DIV_ROUND_CLOSEST(val, 1000), 0, 15); mutex_lock(&data->update_lock); - reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); - data->tolerance[nr] = val; - if (nr == 1) - reg = (reg & 0x0f) | (val << 4); - else + if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { + /* Limit tolerance further for NCT6776F */ + if (sio_data->kind == nct6776 && val > 7) + val = 7; + reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]); reg = (reg & 0xf0) | val; - w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg); + w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg); + } else { + reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); + if (nr == 1) + reg = (reg & 0x0f) | (val << 4); + else + reg = (reg & 0xf0) | val; + w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg); + } + data->tolerance[nr] = val; mutex_unlock(&data->update_lock); return count; } @@ -1143,18 +1516,25 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + struct sensor_device_attribute *sensor_attr = \ + to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ return sprintf(buf, "%d\n", data->reg[nr]); \ -}\ +} \ static ssize_t \ store_##reg(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ -{\ +{ \ struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + struct sensor_device_attribute *sensor_attr = \ + to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ - u32 val = SENSORS_LIMIT(simple_strtoul(buf, NULL, 10), 1, 255); \ + unsigned long val; \ + int err; \ + err = strict_strtoul(buf, 10, &val); \ + if (err < 0) \ + return err; \ + val = SENSORS_LIMIT(val, 1, 255); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ w83627ehf_write_value(data, data->REG_##REG[nr], val); \ @@ -1172,10 +1552,12 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + struct sensor_device_attribute *sensor_attr = \ + to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ return sprintf(buf, "%d\n", \ - step_time_from_reg(data->reg[nr], data->pwm_mode[nr])); \ + step_time_from_reg(data->reg[nr], \ + data->pwm_mode[nr])); \ } \ \ static ssize_t \ @@ -1183,10 +1565,15 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); \ + struct sensor_device_attribute *sensor_attr = \ + to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ - u8 val = step_time_to_reg(simple_strtoul(buf, NULL, 10), \ - data->pwm_mode[nr]); \ + unsigned long val; \ + int err; \ + err = strict_strtoul(buf, 10, &val); \ + if (err < 0) \ + return err; \ + val = step_time_to_reg(val, data->pwm_mode[nr]); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ w83627ehf_write_value(data, W83627EHF_REG_##REG[nr], val); \ @@ -1283,7 +1670,8 @@ static void w83627ehf_device_remove_files(struct device *dev) for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { struct sensor_device_attribute *attr = &sda_sf3_max_step_arrays[i]; - if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) + if (data->REG_FAN_STEP_OUTPUT && + data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) device_remove_file(dev, &attr->dev_attr); } for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) @@ -1309,12 +1697,15 @@ static void w83627ehf_device_remove_files(struct device *dev) device_remove_file(dev, &sda_target_temp[i].dev_attr); device_remove_file(dev, &sda_tolerance[i].dev_attr); } - for (i = 0; i < 3; i++) { - if ((i == 2) && data->temp3_disable) + for (i = 0; i < NUM_REG_TEMP; i++) { + if (!(data->have_temp & (1 << i))) continue; device_remove_file(dev, &sda_temp_input[i].dev_attr); + device_remove_file(dev, &sda_temp_label[i].dev_attr); device_remove_file(dev, &sda_temp_max[i].dev_attr); device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr); + if (i > 2) + continue; device_remove_file(dev, &sda_temp_alarm[i].dev_attr); device_remove_file(dev, &sda_temp_type[i].dev_attr); } @@ -1335,15 +1726,17 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) w83627ehf_write_value(data, W83627EHF_REG_CONFIG, tmp | 0x01); - /* Enable temp2 and temp3 if needed */ - for (i = 0; i < 2; i++) { - tmp = w83627ehf_read_value(data, - W83627EHF_REG_TEMP_CONFIG[i]); - if ((i == 1) && data->temp3_disable) + /* Enable temperature sensors if needed */ + for (i = 0; i < NUM_REG_TEMP; i++) { + if (!(data->have_temp & (1 << i))) + continue; + if (!data->reg_temp_config[i]) continue; + tmp = w83627ehf_read_value(data, + data->reg_temp_config[i]); if (tmp & 0x01) w83627ehf_write_value(data, - W83627EHF_REG_TEMP_CONFIG[i], + data->reg_temp_config[i], tmp & 0xfe); } @@ -1362,13 +1755,39 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) } } +static void w82627ehf_swap_tempreg(struct w83627ehf_data *data, + int r1, int r2) +{ + u16 tmp; + + tmp = data->temp_src[r1]; + data->temp_src[r1] = data->temp_src[r2]; + data->temp_src[r2] = tmp; + + tmp = data->reg_temp[r1]; + data->reg_temp[r1] = data->reg_temp[r2]; + data->reg_temp[r2] = tmp; + + tmp = data->reg_temp_over[r1]; + data->reg_temp_over[r1] = data->reg_temp_over[r2]; + data->reg_temp_over[r2] = tmp; + + tmp = data->reg_temp_hyst[r1]; + data->reg_temp_hyst[r1] = data->reg_temp_hyst[r2]; + data->reg_temp_hyst[r2] = tmp; + + tmp = data->reg_temp_config[r1]; + data->reg_temp_config[r1] = data->reg_temp_config[r2]; + data->reg_temp_config[r2] = tmp; +} + static int __devinit w83627ehf_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct w83627ehf_sio_data *sio_data = dev->platform_data; struct w83627ehf_data *data; struct resource *res; - u8 fan4pin, fan5pin, en_vrm10; + u8 fan3pin, fan4pin, fan4min, fan5pin, en_vrm10; int i, err = 0; res = platform_get_resource(pdev, IORESOURCE_IO, 0); @@ -1380,7 +1799,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) goto exit; } - if (!(data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL))) { + data = kzalloc(sizeof(struct w83627ehf_data), GFP_KERNEL); + if (!data) { err = -ENOMEM; goto exit_release; } @@ -1393,25 +1813,202 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */ data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9; - /* 667HG has 3 pwms */ + /* 667HG, NCT6775F, and NCT6776F have 3 pwms */ data->pwm_num = (sio_data->kind == w83667hg - || sio_data->kind == w83667hg_b) ? 3 : 4; + || sio_data->kind == w83667hg_b + || sio_data->kind == nct6775 + || sio_data->kind == nct6776) ? 3 : 4; + data->have_temp = 0x07; /* Check temp3 configuration bit for 667HG */ - if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { - data->temp3_disable = w83627ehf_read_value(data, - W83627EHF_REG_TEMP_CONFIG[1]) & 0x01; - data->in6_skip = !data->temp3_disable; + if (sio_data->kind == w83667hg) { + u8 reg; + + reg = w83627ehf_read_value(data, W83627EHF_REG_TEMP_CONFIG[2]); + if (reg & 0x01) + data->have_temp &= ~(1 << 2); + else + data->in6_skip = 1; /* either temp3 or in6 */ + } + + /* Deal with temperature register setup first. */ + if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { + int mask = 0; + + /* + * Display temperature sensor output only if it monitors + * a source other than one already reported. Always display + * first three temperature registers, though. + */ + for (i = 0; i < NUM_REG_TEMP; i++) { + u8 src; + + data->reg_temp[i] = NCT6775_REG_TEMP[i]; + data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i]; + data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i]; + data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i]; + + src = w83627ehf_read_value(data, + NCT6775_REG_TEMP_SOURCE[i]); + src &= 0x1f; + if (src && !(mask & (1 << src))) { + data->have_temp |= 1 << i; + mask |= 1 << src; + } + + data->temp_src[i] = src; + + /* + * Now do some register swapping if index 0..2 don't + * point to SYSTIN(1), CPUIN(2), and AUXIN(3). + * Idea is to have the first three attributes + * report SYSTIN, CPUIN, and AUXIN if possible + * without overriding the basic system configuration. + */ + if (i > 0 && data->temp_src[0] != 1 + && data->temp_src[i] == 1) + w82627ehf_swap_tempreg(data, 0, i); + if (i > 1 && data->temp_src[1] != 2 + && data->temp_src[i] == 2) + w82627ehf_swap_tempreg(data, 1, i); + if (i > 2 && data->temp_src[2] != 3 + && data->temp_src[i] == 3) + w82627ehf_swap_tempreg(data, 2, i); + } + if (sio_data->kind == nct6776) { + /* + * On NCT6776, AUXTIN and VIN3 pins are shared. + * Only way to detect it is to check if AUXTIN is used + * as a temperature source, and if that source is + * enabled. + * + * If that is the case, disable in6, which reports VIN3. + * Otherwise disable temp3. + */ + if (data->temp_src[2] == 3) { + u8 reg; + + if (data->reg_temp_config[2]) + reg = w83627ehf_read_value(data, + data->reg_temp_config[2]); + else + reg = 0; /* Assume AUXTIN is used */ + + if (reg & 0x01) + data->have_temp &= ~(1 << 2); + else + data->in6_skip = 1; + } + data->temp_label = nct6776_temp_label; + } else { + data->temp_label = nct6775_temp_label; + } + } else if (sio_data->kind == w83667hg_b) { + u8 reg; + + /* + * Temperature sources are selected with bank 0, registers 0x49 + * and 0x4a. + */ + for (i = 0; i < ARRAY_SIZE(W83627EHF_REG_TEMP); i++) { + data->reg_temp[i] = W83627EHF_REG_TEMP[i]; + data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i]; + data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i]; + data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i]; + } + reg = w83627ehf_read_value(data, 0x4a); + data->temp_src[0] = reg >> 5; + reg = w83627ehf_read_value(data, 0x49); + data->temp_src[1] = reg & 0x07; + data->temp_src[2] = (reg >> 4) & 0x07; + + /* + * W83667HG-B has another temperature register at 0x7e. + * The temperature source is selected with register 0x7d. + * Support it if the source differs from already reported + * sources. + */ + reg = w83627ehf_read_value(data, 0x7d); + reg &= 0x07; + if (reg != data->temp_src[0] && reg != data->temp_src[1] + && reg != data->temp_src[2]) { + data->temp_src[3] = reg; + data->have_temp |= 1 << 3; + } + + /* + * Chip supports either AUXTIN or VIN3. Try to find out which + * one. + */ + reg = w83627ehf_read_value(data, W83627EHF_REG_TEMP_CONFIG[2]); + if (data->temp_src[2] == 2 && (reg & 0x01)) + data->have_temp &= ~(1 << 2); + + if ((data->temp_src[2] == 2 && (data->have_temp & (1 << 2))) + || (data->temp_src[3] == 2 && (data->have_temp & (1 << 3)))) + data->in6_skip = 1; + + data->temp_label = w83667hg_b_temp_label; + } else { + /* Temperature sources are fixed */ + for (i = 0; i < 3; i++) { + data->reg_temp[i] = W83627EHF_REG_TEMP[i]; + data->reg_temp_over[i] = W83627EHF_REG_TEMP_OVER[i]; + data->reg_temp_hyst[i] = W83627EHF_REG_TEMP_HYST[i]; + data->reg_temp_config[i] = W83627EHF_REG_TEMP_CONFIG[i]; + } } - data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; - if (sio_data->kind == w83667hg_b) { + if (sio_data->kind == nct6775) { + data->has_fan_div = true; + data->fan_from_reg = fan_from_reg16; + data->fan_from_reg_min = fan_from_reg8; + data->REG_PWM = NCT6775_REG_PWM; + data->REG_TARGET = NCT6775_REG_TARGET; + data->REG_FAN = NCT6775_REG_FAN; + data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; + data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT; + data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT; + data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME; + data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT; + data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT; + } else if (sio_data->kind == nct6776) { + data->has_fan_div = false; + data->fan_from_reg = fan_from_reg13; + data->fan_from_reg_min = fan_from_reg13; + data->REG_PWM = NCT6775_REG_PWM; + data->REG_TARGET = NCT6775_REG_TARGET; + data->REG_FAN = NCT6775_REG_FAN; + data->REG_FAN_MIN = NCT6776_REG_FAN_MIN; + data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT; + data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT; + data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME; + } else if (sio_data->kind == w83667hg_b) { + data->has_fan_div = true; + data->fan_from_reg = fan_from_reg8; + data->fan_from_reg_min = fan_from_reg8; + data->REG_PWM = W83627EHF_REG_PWM; + data->REG_TARGET = W83627EHF_REG_TARGET; + data->REG_FAN = W83627EHF_REG_FAN; + data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; + data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; + data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; + data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME; data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B; data->REG_FAN_STEP_OUTPUT = W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B; } else { + data->has_fan_div = true; + data->fan_from_reg = fan_from_reg8; + data->fan_from_reg_min = fan_from_reg8; + data->REG_PWM = W83627EHF_REG_PWM; + data->REG_TARGET = W83627EHF_REG_TARGET; + data->REG_FAN = W83627EHF_REG_FAN; + data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; + data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; + data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; + data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME; data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT_COMMON; data->REG_FAN_STEP_OUTPUT = @@ -1424,7 +2021,8 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) data->vrm = vid_which_vrm(); superio_enter(sio_data->sioreg); /* Read VID value */ - if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b || + sio_data->kind == nct6775 || sio_data->kind == nct6776) { /* W83667HG has different pins for VID input and output, so we can get the VID input values directly at logical device D 0xe3. */ @@ -1475,13 +2073,44 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) } /* fan4 and fan5 share some pins with the GPIO and serial flash */ - if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { - fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20; + if (sio_data->kind == nct6775) { + /* On NCT6775, fan4 shares pins with the fdc interface */ + fan3pin = 1; + fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80); + fan4min = 0; + fan5pin = 0; + } else if (sio_data->kind == nct6776) { + fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40); + fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01); + fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02); + fan4min = fan4pin; + } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { + fan3pin = 1; fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40; + fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20; + fan4min = fan4pin; } else { - fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02); + fan3pin = 1; fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06); + fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02); + fan4min = fan4pin; } + + if (fan_debounce && + (sio_data->kind == nct6775 || sio_data->kind == nct6776)) { + u8 tmp; + + superio_select(sio_data->sioreg, W83627EHF_LD_HWM); + tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE); + if (sio_data->kind == nct6776) + superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE, + 0x3e | tmp); + else + superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE, + 0x1e | tmp); + pr_info("Enabled fan debounce for chip %s\n", data->name); + } + superio_exit(sio_data->sioreg); /* It looks like fan4 and fan5 pins can be alternatively used @@ -1490,26 +2119,54 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) connected fan5 as input unless they are emitting log 1, which is not the default. */ - data->has_fan = 0x07; /* fan1, fan2 and fan3 */ - i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1); - if ((i & (1 << 2)) && fan4pin) - data->has_fan |= (1 << 3); - if (!(i & (1 << 1)) && fan5pin) - data->has_fan |= (1 << 4); + data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */ + + data->has_fan |= (fan3pin << 2); + data->has_fan_min |= (fan3pin << 2); + + /* + * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1 register + */ + if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { + data->has_fan |= (fan4pin << 3) | (fan5pin << 4); + data->has_fan_min |= (fan4min << 3) | (fan5pin << 4); + } else { + i = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1); + if ((i & (1 << 2)) && fan4pin) { + data->has_fan |= (1 << 3); + data->has_fan_min |= (1 << 3); + } + if (!(i & (1 << 1)) && fan5pin) { + data->has_fan |= (1 << 4); + data->has_fan_min |= (1 << 4); + } + } /* Read fan clock dividers immediately */ - w83627ehf_update_fan_div(data); + w83627ehf_update_fan_div_common(dev, data); + + /* Read pwm data to save original values */ + w83627ehf_update_pwm_common(dev, data); + for (i = 0; i < data->pwm_num; i++) + data->pwm_enable_orig[i] = data->pwm_enable[i]; + + /* Read pwm data to save original values */ + w83627ehf_update_pwm_common(dev, data); + for (i = 0; i < data->pwm_num; i++) + data->pwm_enable_orig[i] = data->pwm_enable[i]; /* Register sysfs hooks */ - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) - if ((err = device_create_file(dev, - &sda_sf3_arrays[i].dev_attr))) + for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) { + err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr); + if (err) goto exit_remove; + } for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { struct sensor_device_attribute *attr = &sda_sf3_max_step_arrays[i]; - if (data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) { + if (data->REG_FAN_STEP_OUTPUT && + data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) { err = device_create_file(dev, &attr->dev_attr); if (err) goto exit_remove; @@ -1518,8 +2175,9 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) /* if fan4 is enabled create the sf3 files for it */ if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4) for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) { - if ((err = device_create_file(dev, - &sda_sf3_arrays_fan4[i].dev_attr))) + err = device_create_file(dev, + &sda_sf3_arrays_fan4[i].dev_attr); + if (err) goto exit_remove; } @@ -1541,12 +2199,20 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) if ((err = device_create_file(dev, &sda_fan_input[i].dev_attr)) || (err = device_create_file(dev, - &sda_fan_alarm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_fan_div[i].dev_attr)) - || (err = device_create_file(dev, - &sda_fan_min[i].dev_attr))) + &sda_fan_alarm[i].dev_attr))) goto exit_remove; + if (sio_data->kind != nct6776) { + err = device_create_file(dev, + &sda_fan_div[i].dev_attr); + if (err) + goto exit_remove; + } + if (data->has_fan_min & (1 << i)) { + err = device_create_file(dev, + &sda_fan_min[i].dev_attr); + if (err) + goto exit_remove; + } if (i < data->pwm_num && ((err = device_create_file(dev, &sda_pwm[i].dev_attr)) @@ -1562,16 +2228,33 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) } } - for (i = 0; i < 3; i++) { - if ((i == 2) && data->temp3_disable) + for (i = 0; i < NUM_REG_TEMP; i++) { + if (!(data->have_temp & (1 << i))) + continue; + err = device_create_file(dev, &sda_temp_input[i].dev_attr); + if (err) + goto exit_remove; + if (data->temp_label) { + err = device_create_file(dev, + &sda_temp_label[i].dev_attr); + if (err) + goto exit_remove; + } + if (data->reg_temp_over[i]) { + err = device_create_file(dev, + &sda_temp_max[i].dev_attr); + if (err) + goto exit_remove; + } + if (data->reg_temp_hyst[i]) { + err = device_create_file(dev, + &sda_temp_max_hyst[i].dev_attr); + if (err) + goto exit_remove; + } + if (i > 2) continue; if ((err = device_create_file(dev, - &sda_temp_input[i].dev_attr)) - || (err = device_create_file(dev, - &sda_temp_max[i].dev_attr)) - || (err = device_create_file(dev, - &sda_temp_max_hyst[i].dev_attr)) - || (err = device_create_file(dev, &sda_temp_alarm[i].dev_attr)) || (err = device_create_file(dev, &sda_temp_type[i].dev_attr))) @@ -1632,6 +2315,8 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, static const char __initdata sio_name_W83627DHG_P[] = "W83627DHG-P"; static const char __initdata sio_name_W83667HG[] = "W83667HG"; static const char __initdata sio_name_W83667HG_B[] = "W83667HG-B"; + static const char __initdata sio_name_NCT6775[] = "NCT6775F"; + static const char __initdata sio_name_NCT6776[] = "NCT6776F"; u16 val; const char *sio_name; @@ -1668,6 +2353,14 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, sio_data->kind = w83667hg_b; sio_name = sio_name_W83667HG_B; break; + case SIO_NCT6775_ID: + sio_data->kind = nct6775; + sio_name = sio_name_NCT6775; + break; + case SIO_NCT6776_ID: + sio_data->kind = nct6776; + sio_name = sio_name_NCT6776; + break; default: if (val != 0xffff) pr_debug("unsupported chip ID: 0x%04x\n", val); @@ -1689,7 +2382,8 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, /* Activate logical device if needed */ val = superio_inb(sioaddr, SIO_REG_ENABLE); if (!(val & 0x01)) { - pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n"); + pr_warn("Forcibly enabling Super-I/O. " + "Sensor is probably unusable.\n"); superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01); } @@ -1726,7 +2420,8 @@ static int __init sensors_w83627ehf_init(void) if (err) goto exit; - if (!(pdev = platform_device_alloc(DRVNAME, address))) { + pdev = platform_device_alloc(DRVNAME, address); + if (!pdev) { err = -ENOMEM; pr_err("Device allocation failed\n"); goto exit_unregister; diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig new file mode 100644 index 000000000000..eb4af28f8567 --- /dev/null +++ b/drivers/hwspinlock/Kconfig @@ -0,0 +1,22 @@ +# +# Generic HWSPINLOCK framework +# + +config HWSPINLOCK + tristate "Generic Hardware Spinlock framework" + help + Say y here to support the generic hardware spinlock framework. + You only need to enable this if you have hardware spinlock module + on your system (usually only relevant if your system has remote slave + coprocessors). + + If unsure, say N. + +config HWSPINLOCK_OMAP + tristate "OMAP Hardware Spinlock device" + depends on HWSPINLOCK && ARCH_OMAP4 + help + Say y here to support the OMAP Hardware Spinlock device (firstly + introduced in OMAP4). + + If unsure, say N. diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile new file mode 100644 index 000000000000..5729a3f7ed3d --- /dev/null +++ b/drivers/hwspinlock/Makefile @@ -0,0 +1,6 @@ +# +# Generic Hardware Spinlock framework +# + +obj-$(CONFIG_HWSPINLOCK) += hwspinlock_core.o +obj-$(CONFIG_HWSPINLOCK_OMAP) += omap_hwspinlock.o diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c new file mode 100644 index 000000000000..43a62714b4fb --- /dev/null +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -0,0 +1,548 @@ +/* + * Hardware spinlock framework + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com + * + * Contact: Ohad Ben-Cohen <ohad@wizery.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/err.h> +#include <linux/jiffies.h> +#include <linux/radix-tree.h> +#include <linux/hwspinlock.h> +#include <linux/pm_runtime.h> + +#include "hwspinlock_internal.h" + +/* radix tree tags */ +#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ + +/* + * A radix tree is used to maintain the available hwspinlock instances. + * The tree associates hwspinlock pointers with their integer key id, + * and provides easy-to-use API which makes the hwspinlock core code simple + * and easy to read. + * + * Radix trees are quick on lookups, and reasonably efficient in terms of + * storage, especially with high density usages such as this framework + * requires (a continuous range of integer keys, beginning with zero, is + * used as the ID's of the hwspinlock instances). + * + * The radix tree API supports tagging items in the tree, which this + * framework uses to mark unused hwspinlock instances (see the + * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the + * tree, looking for an unused hwspinlock instance, is now reduced to a + * single radix tree API call. + */ +static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); + +/* + * Synchronization of access to the tree is achieved using this spinlock, + * as the radix-tree API requires that users provide all synchronisation. + */ +static DEFINE_SPINLOCK(hwspinlock_tree_lock); + +/** + * __hwspin_trylock() - attempt to lock a specific hwspinlock + * @hwlock: an hwspinlock which we want to trylock + * @mode: controls whether local interrupts are disabled or not + * @flags: a pointer where the caller's interrupt state will be saved at (if + * requested) + * + * This function attempts to lock an hwspinlock, and will immediately + * fail if the hwspinlock is already taken. + * + * Upon a successful return from this function, preemption (and possibly + * interrupts) is disabled, so the caller must not sleep, and is advised to + * release the hwspinlock as soon as possible. This is required in order to + * minimize remote cores polling on the hardware interconnect. + * + * The user decides whether local interrupts are disabled or not, and if yes, + * whether he wants their previous state to be saved. It is up to the user + * to choose the appropriate @mode of operation, exactly the same way users + * should decide between spin_trylock, spin_trylock_irq and + * spin_trylock_irqsave. + * + * Returns 0 if we successfully locked the hwspinlock or -EBUSY if + * the hwspinlock was already taken. + * This function will never sleep. + */ +int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) +{ + int ret; + + BUG_ON(!hwlock); + BUG_ON(!flags && mode == HWLOCK_IRQSTATE); + + /* + * This spin_lock{_irq, _irqsave} serves three purposes: + * + * 1. Disable preemption, in order to minimize the period of time + * in which the hwspinlock is taken. This is important in order + * to minimize the possible polling on the hardware interconnect + * by a remote user of this lock. + * 2. Make the hwspinlock SMP-safe (so we can take it from + * additional contexts on the local host). + * 3. Ensure that in_atomic/might_sleep checks catch potential + * problems with hwspinlock usage (e.g. scheduler checks like + * 'scheduling while atomic' etc.) + */ + if (mode == HWLOCK_IRQSTATE) + ret = spin_trylock_irqsave(&hwlock->lock, *flags); + else if (mode == HWLOCK_IRQ) + ret = spin_trylock_irq(&hwlock->lock); + else + ret = spin_trylock(&hwlock->lock); + + /* is lock already taken by another context on the local cpu ? */ + if (!ret) + return -EBUSY; + + /* try to take the hwspinlock device */ + ret = hwlock->ops->trylock(hwlock); + + /* if hwlock is already taken, undo spin_trylock_* and exit */ + if (!ret) { + if (mode == HWLOCK_IRQSTATE) + spin_unlock_irqrestore(&hwlock->lock, *flags); + else if (mode == HWLOCK_IRQ) + spin_unlock_irq(&hwlock->lock); + else + spin_unlock(&hwlock->lock); + + return -EBUSY; + } + + /* + * We can be sure the other core's memory operations + * are observable to us only _after_ we successfully take + * the hwspinlock, and we must make sure that subsequent memory + * operations (both reads and writes) will not be reordered before + * we actually took the hwspinlock. + * + * Note: the implicit memory barrier of the spinlock above is too + * early, so we need this additional explicit memory barrier. + */ + mb(); + + return 0; +} +EXPORT_SYMBOL_GPL(__hwspin_trylock); + +/** + * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit + * @hwlock: the hwspinlock to be locked + * @timeout: timeout value in msecs + * @mode: mode which controls whether local interrupts are disabled or not + * @flags: a pointer to where the caller's interrupt state will be saved at (if + * requested) + * + * This function locks the given @hwlock. If the @hwlock + * is already taken, the function will busy loop waiting for it to + * be released, but give up after @timeout msecs have elapsed. + * + * Upon a successful return from this function, preemption is disabled + * (and possibly local interrupts, too), so the caller must not sleep, + * and is advised to release the hwspinlock as soon as possible. + * This is required in order to minimize remote cores polling on the + * hardware interconnect. + * + * The user decides whether local interrupts are disabled or not, and if yes, + * whether he wants their previous state to be saved. It is up to the user + * to choose the appropriate @mode of operation, exactly the same way users + * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave. + * + * Returns 0 when the @hwlock was successfully taken, and an appropriate + * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still + * busy after @timeout msecs). The function will never sleep. + */ +int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, + int mode, unsigned long *flags) +{ + int ret; + unsigned long expire; + + expire = msecs_to_jiffies(to) + jiffies; + + for (;;) { + /* Try to take the hwspinlock */ + ret = __hwspin_trylock(hwlock, mode, flags); + if (ret != -EBUSY) + break; + + /* + * The lock is already taken, let's check if the user wants + * us to try again + */ + if (time_is_before_eq_jiffies(expire)) + return -ETIMEDOUT; + + /* + * Allow platform-specific relax handlers to prevent + * hogging the interconnect (no sleeping, though) + */ + if (hwlock->ops->relax) + hwlock->ops->relax(hwlock); + } + + return ret; +} +EXPORT_SYMBOL_GPL(__hwspin_lock_timeout); + +/** + * __hwspin_unlock() - unlock a specific hwspinlock + * @hwlock: a previously-acquired hwspinlock which we want to unlock + * @mode: controls whether local interrupts needs to be restored or not + * @flags: previous caller's interrupt state to restore (if requested) + * + * This function will unlock a specific hwspinlock, enable preemption and + * (possibly) enable interrupts or restore their previous state. + * @hwlock must be already locked before calling this function: it is a bug + * to call unlock on a @hwlock that is already unlocked. + * + * The user decides whether local interrupts should be enabled or not, and + * if yes, whether he wants their previous state to be restored. It is up + * to the user to choose the appropriate @mode of operation, exactly the + * same way users decide between spin_unlock, spin_unlock_irq and + * spin_unlock_irqrestore. + * + * The function will never sleep. + */ +void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) +{ + BUG_ON(!hwlock); + BUG_ON(!flags && mode == HWLOCK_IRQSTATE); + + /* + * We must make sure that memory operations (both reads and writes), + * done before unlocking the hwspinlock, will not be reordered + * after the lock is released. + * + * That's the purpose of this explicit memory barrier. + * + * Note: the memory barrier induced by the spin_unlock below is too + * late; the other core is going to access memory soon after it will + * take the hwspinlock, and by then we want to be sure our memory + * operations are already observable. + */ + mb(); + + hwlock->ops->unlock(hwlock); + + /* Undo the spin_trylock{_irq, _irqsave} called while locking */ + if (mode == HWLOCK_IRQSTATE) + spin_unlock_irqrestore(&hwlock->lock, *flags); + else if (mode == HWLOCK_IRQ) + spin_unlock_irq(&hwlock->lock); + else + spin_unlock(&hwlock->lock); +} +EXPORT_SYMBOL_GPL(__hwspin_unlock); + +/** + * hwspin_lock_register() - register a new hw spinlock + * @hwlock: hwspinlock to register. + * + * This function should be called from the underlying platform-specific + * implementation, to register a new hwspinlock instance. + * + * Can be called from an atomic context (will not sleep) but not from + * within interrupt context. + * + * Returns 0 on success, or an appropriate error code on failure + */ +int hwspin_lock_register(struct hwspinlock *hwlock) +{ + struct hwspinlock *tmp; + int ret; + + if (!hwlock || !hwlock->ops || + !hwlock->ops->trylock || !hwlock->ops->unlock) { + pr_err("invalid parameters\n"); + return -EINVAL; + } + + spin_lock_init(&hwlock->lock); + + spin_lock(&hwspinlock_tree_lock); + + ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); + if (ret) + goto out; + + /* mark this hwspinlock as available */ + tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, + HWSPINLOCK_UNUSED); + + /* self-sanity check which should never fail */ + WARN_ON(tmp != hwlock); + +out: + spin_unlock(&hwspinlock_tree_lock); + return ret; +} +EXPORT_SYMBOL_GPL(hwspin_lock_register); + +/** + * hwspin_lock_unregister() - unregister an hw spinlock + * @id: index of the specific hwspinlock to unregister + * + * This function should be called from the underlying platform-specific + * implementation, to unregister an existing (and unused) hwspinlock. + * + * Can be called from an atomic context (will not sleep) but not from + * within interrupt context. + * + * Returns the address of hwspinlock @id on success, or NULL on failure + */ +struct hwspinlock *hwspin_lock_unregister(unsigned int id) +{ + struct hwspinlock *hwlock = NULL; + int ret; + + spin_lock(&hwspinlock_tree_lock); + + /* make sure the hwspinlock is not in use (tag is set) */ + ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); + if (ret == 0) { + pr_err("hwspinlock %d still in use (or not present)\n", id); + goto out; + } + + hwlock = radix_tree_delete(&hwspinlock_tree, id); + if (!hwlock) { + pr_err("failed to delete hwspinlock %d\n", id); + goto out; + } + +out: + spin_unlock(&hwspinlock_tree_lock); + return hwlock; +} +EXPORT_SYMBOL_GPL(hwspin_lock_unregister); + +/** + * __hwspin_lock_request() - tag an hwspinlock as used and power it up + * + * This is an internal function that prepares an hwspinlock instance + * before it is given to the user. The function assumes that + * hwspinlock_tree_lock is taken. + * + * Returns 0 or positive to indicate success, and a negative value to + * indicate an error (with the appropriate error code) + */ +static int __hwspin_lock_request(struct hwspinlock *hwlock) +{ + struct hwspinlock *tmp; + int ret; + + /* prevent underlying implementation from being removed */ + if (!try_module_get(hwlock->owner)) { + dev_err(hwlock->dev, "%s: can't get owner\n", __func__); + return -EINVAL; + } + + /* notify PM core that power is now needed */ + ret = pm_runtime_get_sync(hwlock->dev); + if (ret < 0) { + dev_err(hwlock->dev, "%s: can't power on device\n", __func__); + return ret; + } + + /* mark hwspinlock as used, should not fail */ + tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id, + HWSPINLOCK_UNUSED); + + /* self-sanity check that should never fail */ + WARN_ON(tmp != hwlock); + + return ret; +} + +/** + * hwspin_lock_get_id() - retrieve id number of a given hwspinlock + * @hwlock: a valid hwspinlock instance + * + * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. + */ +int hwspin_lock_get_id(struct hwspinlock *hwlock) +{ + if (!hwlock) { + pr_err("invalid hwlock\n"); + return -EINVAL; + } + + return hwlock->id; +} +EXPORT_SYMBOL_GPL(hwspin_lock_get_id); + +/** + * hwspin_lock_request() - request an hwspinlock + * + * This function should be called by users of the hwspinlock device, + * in order to dynamically assign them an unused hwspinlock. + * Usually the user of this lock will then have to communicate the lock's id + * to the remote core before it can be used for synchronization (to get the + * id of a given hwlock, use hwspin_lock_get_id()). + * + * Can be called from an atomic context (will not sleep) but not from + * within interrupt context (simply because there is no use case for + * that yet). + * + * Returns the address of the assigned hwspinlock, or NULL on error + */ +struct hwspinlock *hwspin_lock_request(void) +{ + struct hwspinlock *hwlock; + int ret; + + spin_lock(&hwspinlock_tree_lock); + + /* look for an unused lock */ + ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, + 0, 1, HWSPINLOCK_UNUSED); + if (ret == 0) { + pr_warn("a free hwspinlock is not available\n"); + hwlock = NULL; + goto out; + } + + /* sanity check that should never fail */ + WARN_ON(ret > 1); + + /* mark as used and power up */ + ret = __hwspin_lock_request(hwlock); + if (ret < 0) + hwlock = NULL; + +out: + spin_unlock(&hwspinlock_tree_lock); + return hwlock; +} +EXPORT_SYMBOL_GPL(hwspin_lock_request); + +/** + * hwspin_lock_request_specific() - request for a specific hwspinlock + * @id: index of the specific hwspinlock that is requested + * + * This function should be called by users of the hwspinlock module, + * in order to assign them a specific hwspinlock. + * Usually early board code will be calling this function in order to + * reserve specific hwspinlock ids for predefined purposes. + * + * Can be called from an atomic context (will not sleep) but not from + * within interrupt context (simply because there is no use case for + * that yet). + * + * Returns the address of the assigned hwspinlock, or NULL on error + */ +struct hwspinlock *hwspin_lock_request_specific(unsigned int id) +{ + struct hwspinlock *hwlock; + int ret; + + spin_lock(&hwspinlock_tree_lock); + + /* make sure this hwspinlock exists */ + hwlock = radix_tree_lookup(&hwspinlock_tree, id); + if (!hwlock) { + pr_warn("hwspinlock %u does not exist\n", id); + goto out; + } + + /* sanity check (this shouldn't happen) */ + WARN_ON(hwlock->id != id); + + /* make sure this hwspinlock is unused */ + ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); + if (ret == 0) { + pr_warn("hwspinlock %u is already in use\n", id); + hwlock = NULL; + goto out; + } + + /* mark as used and power up */ + ret = __hwspin_lock_request(hwlock); + if (ret < 0) + hwlock = NULL; + +out: + spin_unlock(&hwspinlock_tree_lock); + return hwlock; +} +EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); + +/** + * hwspin_lock_free() - free a specific hwspinlock + * @hwlock: the specific hwspinlock to free + * + * This function mark @hwlock as free again. + * Should only be called with an @hwlock that was retrieved from + * an earlier call to omap_hwspin_lock_request{_specific}. + * + * Can be called from an atomic context (will not sleep) but not from + * within interrupt context (simply because there is no use case for + * that yet). + * + * Returns 0 on success, or an appropriate error code on failure + */ +int hwspin_lock_free(struct hwspinlock *hwlock) +{ + struct hwspinlock *tmp; + int ret; + + if (!hwlock) { + pr_err("invalid hwlock\n"); + return -EINVAL; + } + + spin_lock(&hwspinlock_tree_lock); + + /* make sure the hwspinlock is used */ + ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, + HWSPINLOCK_UNUSED); + if (ret == 1) { + dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__); + dump_stack(); + ret = -EINVAL; + goto out; + } + + /* notify the underlying device that power is not needed */ + ret = pm_runtime_put(hwlock->dev); + if (ret < 0) + goto out; + + /* mark this hwspinlock as available */ + tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id, + HWSPINLOCK_UNUSED); + + /* sanity check (this shouldn't happen) */ + WARN_ON(tmp != hwlock); + + module_put(hwlock->owner); + +out: + spin_unlock(&hwspinlock_tree_lock); + return ret; +} +EXPORT_SYMBOL_GPL(hwspin_lock_free); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Hardware spinlock interface"); +MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>"); diff --git a/drivers/hwspinlock/hwspinlock_internal.h b/drivers/hwspinlock/hwspinlock_internal.h new file mode 100644 index 000000000000..69935e6b93e5 --- /dev/null +++ b/drivers/hwspinlock/hwspinlock_internal.h @@ -0,0 +1,61 @@ +/* + * Hardware spinlocks internal header + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com + * + * Contact: Ohad Ben-Cohen <ohad@wizery.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __HWSPINLOCK_HWSPINLOCK_H +#define __HWSPINLOCK_HWSPINLOCK_H + +#include <linux/spinlock.h> +#include <linux/device.h> + +/** + * struct hwspinlock_ops - platform-specific hwspinlock handlers + * + * @trylock: make a single attempt to take the lock. returns 0 on + * failure and true on success. may _not_ sleep. + * @unlock: release the lock. always succeed. may _not_ sleep. + * @relax: optional, platform-specific relax handler, called by hwspinlock + * core while spinning on a lock, between two successive + * invocations of @trylock. may _not_ sleep. + */ +struct hwspinlock_ops { + int (*trylock)(struct hwspinlock *lock); + void (*unlock)(struct hwspinlock *lock); + void (*relax)(struct hwspinlock *lock); +}; + +/** + * struct hwspinlock - this struct represents a single hwspinlock instance + * + * @dev: underlying device, will be used to invoke runtime PM api + * @ops: platform-specific hwspinlock handlers + * @id: a global, unique, system-wide, index of the lock. + * @lock: initialized and used by hwspinlock core + * @owner: underlying implementation module, used to maintain module ref count + * + * Note: currently simplicity was opted for, but later we can squeeze some + * memory bytes by grouping the dev, ops and owner members in a single + * per-platform struct, and have all hwspinlocks point at it. + */ +struct hwspinlock { + struct device *dev; + const struct hwspinlock_ops *ops; + int id; + spinlock_t lock; + struct module *owner; +}; + +#endif /* __HWSPINLOCK_HWSPINLOCK_H */ diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c new file mode 100644 index 000000000000..a8f02734c026 --- /dev/null +++ b/drivers/hwspinlock/omap_hwspinlock.c @@ -0,0 +1,231 @@ +/* + * OMAP hardware spinlock driver + * + * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com + * + * Contact: Simon Que <sque@ti.com> + * Hari Kanigeri <h-kanigeri2@ti.com> + * Ohad Ben-Cohen <ohad@wizery.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/hwspinlock.h> +#include <linux/platform_device.h> + +#include "hwspinlock_internal.h" + +/* Spinlock register offsets */ +#define SYSSTATUS_OFFSET 0x0014 +#define LOCK_BASE_OFFSET 0x0800 + +#define SPINLOCK_NUMLOCKS_BIT_OFFSET (24) + +/* Possible values of SPINLOCK_LOCK_REG */ +#define SPINLOCK_NOTTAKEN (0) /* free */ +#define SPINLOCK_TAKEN (1) /* locked */ + +#define to_omap_hwspinlock(lock) \ + container_of(lock, struct omap_hwspinlock, lock) + +struct omap_hwspinlock { + struct hwspinlock lock; + void __iomem *addr; +}; + +struct omap_hwspinlock_state { + int num_locks; /* Total number of locks in system */ + void __iomem *io_base; /* Mapped base address */ +}; + +static int omap_hwspinlock_trylock(struct hwspinlock *lock) +{ + struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock); + + /* attempt to acquire the lock by reading its value */ + return (SPINLOCK_NOTTAKEN == readl(omap_lock->addr)); +} + +static void omap_hwspinlock_unlock(struct hwspinlock *lock) +{ + struct omap_hwspinlock *omap_lock = to_omap_hwspinlock(lock); + + /* release the lock by writing 0 to it */ + writel(SPINLOCK_NOTTAKEN, omap_lock->addr); +} + +/* + * relax the OMAP interconnect while spinning on it. + * + * The specs recommended that the retry delay time will be + * just over half of the time that a requester would be + * expected to hold the lock. + * + * The number below is taken from an hardware specs example, + * obviously it is somewhat arbitrary. + */ +static void omap_hwspinlock_relax(struct hwspinlock *lock) +{ + ndelay(50); +} + +static const struct hwspinlock_ops omap_hwspinlock_ops = { + .trylock = omap_hwspinlock_trylock, + .unlock = omap_hwspinlock_unlock, + .relax = omap_hwspinlock_relax, +}; + +static int __devinit omap_hwspinlock_probe(struct platform_device *pdev) +{ + struct omap_hwspinlock *omap_lock; + struct omap_hwspinlock_state *state; + struct hwspinlock *lock; + struct resource *res; + void __iomem *io_base; + int i, ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + io_base = ioremap(res->start, resource_size(res)); + if (!io_base) { + ret = -ENOMEM; + goto free_state; + } + + /* Determine number of locks */ + i = readl(io_base + SYSSTATUS_OFFSET); + i >>= SPINLOCK_NUMLOCKS_BIT_OFFSET; + + /* one of the four lsb's must be set, and nothing else */ + if (hweight_long(i & 0xf) != 1 || i > 8) { + ret = -EINVAL; + goto iounmap_base; + } + + state->num_locks = i * 32; + state->io_base = io_base; + + platform_set_drvdata(pdev, state); + + /* + * runtime PM will make sure the clock of this module is + * enabled iff at least one lock is requested + */ + pm_runtime_enable(&pdev->dev); + + for (i = 0; i < state->num_locks; i++) { + omap_lock = kzalloc(sizeof(*omap_lock), GFP_KERNEL); + if (!omap_lock) { + ret = -ENOMEM; + goto free_locks; + } + + omap_lock->lock.dev = &pdev->dev; + omap_lock->lock.owner = THIS_MODULE; + omap_lock->lock.id = i; + omap_lock->lock.ops = &omap_hwspinlock_ops; + omap_lock->addr = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i; + + ret = hwspin_lock_register(&omap_lock->lock); + if (ret) { + kfree(omap_lock); + goto free_locks; + } + } + + return 0; + +free_locks: + while (--i >= 0) { + lock = hwspin_lock_unregister(i); + /* this should't happen, but let's give our best effort */ + if (!lock) { + dev_err(&pdev->dev, "%s: cleanups failed\n", __func__); + continue; + } + omap_lock = to_omap_hwspinlock(lock); + kfree(omap_lock); + } + pm_runtime_disable(&pdev->dev); +iounmap_base: + iounmap(io_base); +free_state: + kfree(state); + return ret; +} + +static int omap_hwspinlock_remove(struct platform_device *pdev) +{ + struct omap_hwspinlock_state *state = platform_get_drvdata(pdev); + struct hwspinlock *lock; + struct omap_hwspinlock *omap_lock; + int i; + + for (i = 0; i < state->num_locks; i++) { + lock = hwspin_lock_unregister(i); + /* this shouldn't happen at this point. if it does, at least + * don't continue with the remove */ + if (!lock) { + dev_err(&pdev->dev, "%s: failed on %d\n", __func__, i); + return -EBUSY; + } + + omap_lock = to_omap_hwspinlock(lock); + kfree(omap_lock); + } + + pm_runtime_disable(&pdev->dev); + iounmap(state->io_base); + kfree(state); + + return 0; +} + +static struct platform_driver omap_hwspinlock_driver = { + .probe = omap_hwspinlock_probe, + .remove = omap_hwspinlock_remove, + .driver = { + .name = "omap_hwspinlock", + }, +}; + +static int __init omap_hwspinlock_init(void) +{ + return platform_driver_register(&omap_hwspinlock_driver); +} +/* board init code might need to reserve hwspinlocks for predefined purposes */ +postcore_initcall(omap_hwspinlock_init); + +static void __exit omap_hwspinlock_exit(void) +{ + platform_driver_unregister(&omap_hwspinlock_driver); +} +module_exit(omap_hwspinlock_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Hardware spinlock driver for OMAP"); +MODULE_AUTHOR("Simon Que <sque@ti.com>"); +MODULE_AUTHOR("Hari Kanigeri <h-kanigeri2@ti.com>"); +MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>"); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index cbfcf6fb4a61..230601e8853f 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -433,7 +433,7 @@ config I2C_IXP2000 config I2C_MPC tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx" - depends on PPC32 + depends on PPC help If you say yes to this option, support will be included for the built-in I2C interface on the MPC107, Tsi107, MPC512x, MPC52xx, @@ -452,6 +452,16 @@ config I2C_MV64XXX This driver can also be built as a module. If so, the module will be called i2c-mv64xxx. +config I2C_MXS + tristate "Freescale i.MX28 I2C interface" + depends on SOC_IMX28 + help + Say Y here if you want to use the I2C bus controller on + the Freescale i.MX28 processors. + + This driver can also be built as a module. If so, the module + will be called i2c-mxs. + config I2C_NOMADIK tristate "ST-Ericsson Nomadik/Ux500 I2C Controller" depends on PLAT_NOMADIK @@ -618,6 +628,13 @@ config I2C_STU300 This driver can also be built as a module. If so, the module will be called i2c-stu300. +config I2C_TEGRA + tristate "NVIDIA Tegra internal I2C controller" + depends on ARCH_TEGRA + help + If you say yes to this option, support will be included for the + I2C controller embedded in NVIDIA Tegra SOCs + config I2C_VERSATILE tristate "ARM Versatile/Realview I2C bus support" depends on ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index a83966acc5ab..3878c959d4fa 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o obj-$(CONFIG_I2C_MPC) += i2c-mpc.o obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o +obj-$(CONFIG_I2C_MXS) += i2c-mxs.o obj-$(CONFIG_I2C_NOMADIK) += i2c-nomadik.o obj-$(CONFIG_I2C_NUC900) += i2c-nuc900.o obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o @@ -59,6 +60,7 @@ obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o obj-$(CONFIG_I2C_SH_MOBILE) += i2c-sh_mobile.o obj-$(CONFIG_I2C_SIMTEC) += i2c-simtec.o obj-$(CONFIG_I2C_STU300) += i2c-stu300.o +obj-$(CONFIG_I2C_TEGRA) += i2c-tegra.o obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c new file mode 100644 index 000000000000..8022e2390a5a --- /dev/null +++ b/drivers/i2c/busses/i2c-mxs.c @@ -0,0 +1,412 @@ +/* + * Freescale MXS I2C bus driver + * + * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K. + * + * based on a (non-working) driver which was: + * + * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved. + * + * TODO: add dma-support if platform-support for it is available + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/completion.h> +#include <linux/platform_device.h> +#include <linux/jiffies.h> +#include <linux/io.h> + +#include <mach/common.h> + +#define DRIVER_NAME "mxs-i2c" + +#define MXS_I2C_CTRL0 (0x00) +#define MXS_I2C_CTRL0_SET (0x04) + +#define MXS_I2C_CTRL0_SFTRST 0x80000000 +#define MXS_I2C_CTRL0_SEND_NAK_ON_LAST 0x02000000 +#define MXS_I2C_CTRL0_RETAIN_CLOCK 0x00200000 +#define MXS_I2C_CTRL0_POST_SEND_STOP 0x00100000 +#define MXS_I2C_CTRL0_PRE_SEND_START 0x00080000 +#define MXS_I2C_CTRL0_MASTER_MODE 0x00020000 +#define MXS_I2C_CTRL0_DIRECTION 0x00010000 +#define MXS_I2C_CTRL0_XFER_COUNT(v) ((v) & 0x0000FFFF) + +#define MXS_I2C_CTRL1 (0x40) +#define MXS_I2C_CTRL1_SET (0x44) +#define MXS_I2C_CTRL1_CLR (0x48) + +#define MXS_I2C_CTRL1_BUS_FREE_IRQ 0x80 +#define MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ 0x40 +#define MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ 0x20 +#define MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ 0x10 +#define MXS_I2C_CTRL1_EARLY_TERM_IRQ 0x08 +#define MXS_I2C_CTRL1_MASTER_LOSS_IRQ 0x04 +#define MXS_I2C_CTRL1_SLAVE_STOP_IRQ 0x02 +#define MXS_I2C_CTRL1_SLAVE_IRQ 0x01 + +#define MXS_I2C_IRQ_MASK (MXS_I2C_CTRL1_DATA_ENGINE_CMPLT_IRQ | \ + MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ | \ + MXS_I2C_CTRL1_EARLY_TERM_IRQ | \ + MXS_I2C_CTRL1_MASTER_LOSS_IRQ | \ + MXS_I2C_CTRL1_SLAVE_STOP_IRQ | \ + MXS_I2C_CTRL1_SLAVE_IRQ) + +#define MXS_I2C_QUEUECTRL (0x60) +#define MXS_I2C_QUEUECTRL_SET (0x64) +#define MXS_I2C_QUEUECTRL_CLR (0x68) + +#define MXS_I2C_QUEUECTRL_QUEUE_RUN 0x20 +#define MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE 0x04 + +#define MXS_I2C_QUEUESTAT (0x70) +#define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000 + +#define MXS_I2C_QUEUECMD (0x80) + +#define MXS_I2C_QUEUEDATA (0x90) + +#define MXS_I2C_DATA (0xa0) + + +#define MXS_CMD_I2C_SELECT (MXS_I2C_CTRL0_RETAIN_CLOCK | \ + MXS_I2C_CTRL0_PRE_SEND_START | \ + MXS_I2C_CTRL0_MASTER_MODE | \ + MXS_I2C_CTRL0_DIRECTION | \ + MXS_I2C_CTRL0_XFER_COUNT(1)) + +#define MXS_CMD_I2C_WRITE (MXS_I2C_CTRL0_PRE_SEND_START | \ + MXS_I2C_CTRL0_MASTER_MODE | \ + MXS_I2C_CTRL0_DIRECTION) + +#define MXS_CMD_I2C_READ (MXS_I2C_CTRL0_SEND_NAK_ON_LAST | \ + MXS_I2C_CTRL0_MASTER_MODE) + +/** + * struct mxs_i2c_dev - per device, private MXS-I2C data + * + * @dev: driver model device node + * @regs: IO registers pointer + * @cmd_complete: completion object for transaction wait + * @cmd_err: error code for last transaction + * @adapter: i2c subsystem adapter node + */ +struct mxs_i2c_dev { + struct device *dev; + void __iomem *regs; + struct completion cmd_complete; + u32 cmd_err; + struct i2c_adapter adapter; +}; + +/* + * TODO: check if calls to here are really needed. If not, we could get rid of + * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably. + */ +static void mxs_i2c_reset(struct mxs_i2c_dev *i2c) +{ + mxs_reset_block(i2c->regs); + writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); +} + +static void mxs_i2c_pioq_setup_read(struct mxs_i2c_dev *i2c, u8 addr, int len, + int flags) +{ + u32 data; + + writel(MXS_CMD_I2C_SELECT, i2c->regs + MXS_I2C_QUEUECMD); + + data = (addr << 1) | I2C_SMBUS_READ; + writel(data, i2c->regs + MXS_I2C_DATA); + + data = MXS_CMD_I2C_READ | MXS_I2C_CTRL0_XFER_COUNT(len) | flags; + writel(data, i2c->regs + MXS_I2C_QUEUECMD); +} + +static void mxs_i2c_pioq_setup_write(struct mxs_i2c_dev *i2c, + u8 addr, u8 *buf, int len, int flags) +{ + u32 data; + int i, shifts_left; + + data = MXS_CMD_I2C_WRITE | MXS_I2C_CTRL0_XFER_COUNT(len + 1) | flags; + writel(data, i2c->regs + MXS_I2C_QUEUECMD); + + /* + * We have to copy the slave address (u8) and buffer (arbitrary number + * of u8) into the data register (u32). To achieve that, the u8 are put + * into the MSBs of 'data' which is then shifted for the next u8. When + * apropriate, 'data' is written to MXS_I2C_DATA. So, the first u32 + * looks like this: + * + * 3 2 1 0 + * 10987654|32109876|54321098|76543210 + * --------+--------+--------+-------- + * buffer+2|buffer+1|buffer+0|slave_addr + */ + + data = ((addr << 1) | I2C_SMBUS_WRITE) << 24; + + for (i = 0; i < len; i++) { + data >>= 8; + data |= buf[i] << 24; + if ((i & 3) == 2) + writel(data, i2c->regs + MXS_I2C_DATA); + } + + /* Write out the remaining bytes if any */ + shifts_left = 24 - (i & 3) * 8; + if (shifts_left) + writel(data >> shifts_left, i2c->regs + MXS_I2C_DATA); +} + +/* + * TODO: should be replaceable with a waitqueue and RD_QUEUE_IRQ (setting the + * rd_threshold to 1). Couldn't get this to work, though. + */ +static int mxs_i2c_wait_for_data(struct mxs_i2c_dev *i2c) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + + while (readl(i2c->regs + MXS_I2C_QUEUESTAT) + & MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY) { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + cond_resched(); + } + + return 0; +} + +static int mxs_i2c_finish_read(struct mxs_i2c_dev *i2c, u8 *buf, int len) +{ + u32 data; + int i; + + for (i = 0; i < len; i++) { + if ((i & 3) == 0) { + if (mxs_i2c_wait_for_data(i2c)) + return -ETIMEDOUT; + data = readl(i2c->regs + MXS_I2C_QUEUEDATA); + } + buf[i] = data & 0xff; + data >>= 8; + } + + return 0; +} + +/* + * Low level master read/write transaction. + */ +static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, + int stop) +{ + struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap); + int ret; + int flags; + + init_completion(&i2c->cmd_complete); + + dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n", + msg->addr, msg->len, msg->flags, stop); + + if (msg->len == 0) + return -EINVAL; + + flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0; + + if (msg->flags & I2C_M_RD) + mxs_i2c_pioq_setup_read(i2c, msg->addr, msg->len, flags); + else + mxs_i2c_pioq_setup_write(i2c, msg->addr, msg->buf, msg->len, + flags); + + writel(MXS_I2C_QUEUECTRL_QUEUE_RUN, + i2c->regs + MXS_I2C_QUEUECTRL_SET); + + ret = wait_for_completion_timeout(&i2c->cmd_complete, + msecs_to_jiffies(1000)); + if (ret == 0) + goto timeout; + + if ((!i2c->cmd_err) && (msg->flags & I2C_M_RD)) { + ret = mxs_i2c_finish_read(i2c, msg->buf, msg->len); + if (ret) + goto timeout; + } + + if (i2c->cmd_err == -ENXIO) + mxs_i2c_reset(i2c); + + dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err); + + return i2c->cmd_err; + +timeout: + dev_dbg(i2c->dev, "Timeout!\n"); + mxs_i2c_reset(i2c); + return -ETIMEDOUT; +} + +static int mxs_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], + int num) +{ + int i; + int err; + + for (i = 0; i < num; i++) { + err = mxs_i2c_xfer_msg(adap, &msgs[i], i == (num - 1)); + if (err) + return err; + } + + return num; +} + +static u32 mxs_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); +} + +static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id) +{ + struct mxs_i2c_dev *i2c = dev_id; + u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK; + + if (!stat) + return IRQ_NONE; + + if (stat & MXS_I2C_CTRL1_NO_SLAVE_ACK_IRQ) + i2c->cmd_err = -ENXIO; + else if (stat & (MXS_I2C_CTRL1_EARLY_TERM_IRQ | + MXS_I2C_CTRL1_MASTER_LOSS_IRQ | + MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ)) + /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */ + i2c->cmd_err = -EIO; + else + i2c->cmd_err = 0; + + complete(&i2c->cmd_complete); + + writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR); + return IRQ_HANDLED; +} + +static const struct i2c_algorithm mxs_i2c_algo = { + .master_xfer = mxs_i2c_xfer, + .functionality = mxs_i2c_func, +}; + +static int __devinit mxs_i2c_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mxs_i2c_dev *i2c; + struct i2c_adapter *adap; + struct resource *res; + resource_size_t res_size; + int err, irq; + + i2c = devm_kzalloc(dev, sizeof(struct mxs_i2c_dev), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENOENT; + + res_size = resource_size(res); + if (!devm_request_mem_region(dev, res->start, res_size, res->name)) + return -EBUSY; + + i2c->regs = devm_ioremap_nocache(dev, res->start, res_size); + if (!i2c->regs) + return -EBUSY; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + err = devm_request_irq(dev, irq, mxs_i2c_isr, 0, dev_name(dev), i2c); + if (err) + return err; + + i2c->dev = dev; + platform_set_drvdata(pdev, i2c); + + /* Do reset to enforce correct startup after pinmuxing */ + mxs_i2c_reset(i2c); + writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE, + i2c->regs + MXS_I2C_QUEUECTRL_SET); + + adap = &i2c->adapter; + strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name)); + adap->owner = THIS_MODULE; + adap->algo = &mxs_i2c_algo; + adap->dev.parent = dev; + adap->nr = pdev->id; + i2c_set_adapdata(adap, i2c); + err = i2c_add_numbered_adapter(adap); + if (err) { + dev_err(dev, "Failed to add adapter (%d)\n", err); + writel(MXS_I2C_CTRL0_SFTRST, + i2c->regs + MXS_I2C_CTRL0_SET); + return err; + } + + return 0; +} + +static int __devexit mxs_i2c_remove(struct platform_device *pdev) +{ + struct mxs_i2c_dev *i2c = platform_get_drvdata(pdev); + int ret; + + ret = i2c_del_adapter(&i2c->adapter); + if (ret) + return -EBUSY; + + writel(MXS_I2C_QUEUECTRL_QUEUE_RUN, + i2c->regs + MXS_I2C_QUEUECTRL_CLR); + writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver mxs_i2c_driver = { + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + }, + .remove = __devexit_p(mxs_i2c_remove), +}; + +static int __init mxs_i2c_init(void) +{ + return platform_driver_probe(&mxs_i2c_driver, mxs_i2c_probe); +} +subsys_initcall(mxs_i2c_init); + +static void __exit mxs_i2c_exit(void) +{ + platform_driver_unregister(&mxs_i2c_driver); +} +module_exit(mxs_i2c_exit); + +MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); +MODULE_DESCRIPTION("MXS I2C Bus Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c new file mode 100644 index 000000000000..3921f664c9c3 --- /dev/null +++ b/drivers/i2c/busses/i2c-tegra.c @@ -0,0 +1,700 @@ +/* + * drivers/i2c/busses/i2c-tegra.c + * + * Copyright (C) 2010 Google, Inc. + * Author: Colin Cross <ccross@android.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/i2c-tegra.h> + +#include <asm/unaligned.h> + +#include <mach/clk.h> + +#define TEGRA_I2C_TIMEOUT (msecs_to_jiffies(1000)) +#define BYTES_PER_FIFO_WORD 4 + +#define I2C_CNFG 0x000 +#define I2C_CNFG_PACKET_MODE_EN (1<<10) +#define I2C_CNFG_NEW_MASTER_FSM (1<<11) +#define I2C_SL_CNFG 0x020 +#define I2C_SL_CNFG_NEWSL (1<<2) +#define I2C_SL_ADDR1 0x02c +#define I2C_TX_FIFO 0x050 +#define I2C_RX_FIFO 0x054 +#define I2C_PACKET_TRANSFER_STATUS 0x058 +#define I2C_FIFO_CONTROL 0x05c +#define I2C_FIFO_CONTROL_TX_FLUSH (1<<1) +#define I2C_FIFO_CONTROL_RX_FLUSH (1<<0) +#define I2C_FIFO_CONTROL_TX_TRIG_SHIFT 5 +#define I2C_FIFO_CONTROL_RX_TRIG_SHIFT 2 +#define I2C_FIFO_STATUS 0x060 +#define I2C_FIFO_STATUS_TX_MASK 0xF0 +#define I2C_FIFO_STATUS_TX_SHIFT 4 +#define I2C_FIFO_STATUS_RX_MASK 0x0F +#define I2C_FIFO_STATUS_RX_SHIFT 0 +#define I2C_INT_MASK 0x064 +#define I2C_INT_STATUS 0x068 +#define I2C_INT_PACKET_XFER_COMPLETE (1<<7) +#define I2C_INT_ALL_PACKETS_XFER_COMPLETE (1<<6) +#define I2C_INT_TX_FIFO_OVERFLOW (1<<5) +#define I2C_INT_RX_FIFO_UNDERFLOW (1<<4) +#define I2C_INT_NO_ACK (1<<3) +#define I2C_INT_ARBITRATION_LOST (1<<2) +#define I2C_INT_TX_FIFO_DATA_REQ (1<<1) +#define I2C_INT_RX_FIFO_DATA_REQ (1<<0) +#define I2C_CLK_DIVISOR 0x06c + +#define DVC_CTRL_REG1 0x000 +#define DVC_CTRL_REG1_INTR_EN (1<<10) +#define DVC_CTRL_REG2 0x004 +#define DVC_CTRL_REG3 0x008 +#define DVC_CTRL_REG3_SW_PROG (1<<26) +#define DVC_CTRL_REG3_I2C_DONE_INTR_EN (1<<30) +#define DVC_STATUS 0x00c +#define DVC_STATUS_I2C_DONE_INTR (1<<30) + +#define I2C_ERR_NONE 0x00 +#define I2C_ERR_NO_ACK 0x01 +#define I2C_ERR_ARBITRATION_LOST 0x02 + +#define PACKET_HEADER0_HEADER_SIZE_SHIFT 28 +#define PACKET_HEADER0_PACKET_ID_SHIFT 16 +#define PACKET_HEADER0_CONT_ID_SHIFT 12 +#define PACKET_HEADER0_PROTOCOL_I2C (1<<4) + +#define I2C_HEADER_HIGHSPEED_MODE (1<<22) +#define I2C_HEADER_CONT_ON_NAK (1<<21) +#define I2C_HEADER_SEND_START_BYTE (1<<20) +#define I2C_HEADER_READ (1<<19) +#define I2C_HEADER_10BIT_ADDR (1<<18) +#define I2C_HEADER_IE_ENABLE (1<<17) +#define I2C_HEADER_REPEAT_START (1<<16) +#define I2C_HEADER_MASTER_ADDR_SHIFT 12 +#define I2C_HEADER_SLAVE_ADDR_SHIFT 1 + +/** + * struct tegra_i2c_dev - per device i2c context + * @dev: device reference for power management + * @adapter: core i2c layer adapter information + * @clk: clock reference for i2c controller + * @i2c_clk: clock reference for i2c bus + * @iomem: memory resource for registers + * @base: ioremapped registers cookie + * @cont_id: i2c controller id, used for for packet header + * @irq: irq number of transfer complete interrupt + * @is_dvc: identifies the DVC i2c controller, has a different register layout + * @msg_complete: transfer completion notifier + * @msg_err: error code for completed message + * @msg_buf: pointer to current message data + * @msg_buf_remaining: size of unsent data in the message buffer + * @msg_read: identifies read transfers + * @bus_clk_rate: current i2c bus clock rate + * @is_suspended: prevents i2c controller accesses after suspend is called + */ +struct tegra_i2c_dev { + struct device *dev; + struct i2c_adapter adapter; + struct clk *clk; + struct clk *i2c_clk; + struct resource *iomem; + void __iomem *base; + int cont_id; + int irq; + int is_dvc; + struct completion msg_complete; + int msg_err; + u8 *msg_buf; + size_t msg_buf_remaining; + int msg_read; + unsigned long bus_clk_rate; + bool is_suspended; +}; + +static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg) +{ + writel(val, i2c_dev->base + reg); +} + +static u32 dvc_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg) +{ + return readl(i2c_dev->base + reg); +} + +/* + * i2c_writel and i2c_readl will offset the register if necessary to talk + * to the I2C block inside the DVC block + */ +static unsigned long tegra_i2c_reg_addr(struct tegra_i2c_dev *i2c_dev, + unsigned long reg) +{ + if (i2c_dev->is_dvc) + reg += (reg >= I2C_TX_FIFO) ? 0x10 : 0x40; + return reg; +} + +static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, + unsigned long reg) +{ + writel(val, i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); +} + +static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned long reg) +{ + return readl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); +} + +static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data, + unsigned long reg, int len) +{ + writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len); +} + +static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data, + unsigned long reg, int len) +{ + readsl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len); +} + +static void tegra_i2c_mask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask) +{ + u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK); + int_mask &= ~mask; + i2c_writel(i2c_dev, int_mask, I2C_INT_MASK); +} + +static void tegra_i2c_unmask_irq(struct tegra_i2c_dev *i2c_dev, u32 mask) +{ + u32 int_mask = i2c_readl(i2c_dev, I2C_INT_MASK); + int_mask |= mask; + i2c_writel(i2c_dev, int_mask, I2C_INT_MASK); +} + +static int tegra_i2c_flush_fifos(struct tegra_i2c_dev *i2c_dev) +{ + unsigned long timeout = jiffies + HZ; + u32 val = i2c_readl(i2c_dev, I2C_FIFO_CONTROL); + val |= I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH; + i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL); + + while (i2c_readl(i2c_dev, I2C_FIFO_CONTROL) & + (I2C_FIFO_CONTROL_TX_FLUSH | I2C_FIFO_CONTROL_RX_FLUSH)) { + if (time_after(jiffies, timeout)) { + dev_warn(i2c_dev->dev, "timeout waiting for fifo flush\n"); + return -ETIMEDOUT; + } + msleep(1); + } + return 0; +} + +static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev) +{ + u32 val; + int rx_fifo_avail; + u8 *buf = i2c_dev->msg_buf; + size_t buf_remaining = i2c_dev->msg_buf_remaining; + int words_to_transfer; + + val = i2c_readl(i2c_dev, I2C_FIFO_STATUS); + rx_fifo_avail = (val & I2C_FIFO_STATUS_RX_MASK) >> + I2C_FIFO_STATUS_RX_SHIFT; + + /* Rounds down to not include partial word at the end of buf */ + words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; + if (words_to_transfer > rx_fifo_avail) + words_to_transfer = rx_fifo_avail; + + i2c_readsl(i2c_dev, buf, I2C_RX_FIFO, words_to_transfer); + + buf += words_to_transfer * BYTES_PER_FIFO_WORD; + buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; + rx_fifo_avail -= words_to_transfer; + + /* + * If there is a partial word at the end of buf, handle it manually to + * prevent overwriting past the end of buf + */ + if (rx_fifo_avail > 0 && buf_remaining > 0) { + BUG_ON(buf_remaining > 3); + val = i2c_readl(i2c_dev, I2C_RX_FIFO); + memcpy(buf, &val, buf_remaining); + buf_remaining = 0; + rx_fifo_avail--; + } + + BUG_ON(rx_fifo_avail > 0 && buf_remaining > 0); + i2c_dev->msg_buf_remaining = buf_remaining; + i2c_dev->msg_buf = buf; + return 0; +} + +static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) +{ + u32 val; + int tx_fifo_avail; + u8 *buf = i2c_dev->msg_buf; + size_t buf_remaining = i2c_dev->msg_buf_remaining; + int words_to_transfer; + + val = i2c_readl(i2c_dev, I2C_FIFO_STATUS); + tx_fifo_avail = (val & I2C_FIFO_STATUS_TX_MASK) >> + I2C_FIFO_STATUS_TX_SHIFT; + + /* Rounds down to not include partial word at the end of buf */ + words_to_transfer = buf_remaining / BYTES_PER_FIFO_WORD; + if (words_to_transfer > tx_fifo_avail) + words_to_transfer = tx_fifo_avail; + + i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); + + buf += words_to_transfer * BYTES_PER_FIFO_WORD; + buf_remaining -= words_to_transfer * BYTES_PER_FIFO_WORD; + tx_fifo_avail -= words_to_transfer; + + /* + * If there is a partial word at the end of buf, handle it manually to + * prevent reading past the end of buf, which could cross a page + * boundary and fault. + */ + if (tx_fifo_avail > 0 && buf_remaining > 0) { + BUG_ON(buf_remaining > 3); + memcpy(&val, buf, buf_remaining); + i2c_writel(i2c_dev, val, I2C_TX_FIFO); + buf_remaining = 0; + tx_fifo_avail--; + } + + BUG_ON(tx_fifo_avail > 0 && buf_remaining > 0); + i2c_dev->msg_buf_remaining = buf_remaining; + i2c_dev->msg_buf = buf; + return 0; +} + +/* + * One of the Tegra I2C blocks is inside the DVC (Digital Voltage Controller) + * block. This block is identical to the rest of the I2C blocks, except that + * it only supports master mode, it has registers moved around, and it needs + * some extra init to get it into I2C mode. The register moves are handled + * by i2c_readl and i2c_writel + */ +static void tegra_dvc_init(struct tegra_i2c_dev *i2c_dev) +{ + u32 val = 0; + val = dvc_readl(i2c_dev, DVC_CTRL_REG3); + val |= DVC_CTRL_REG3_SW_PROG; + val |= DVC_CTRL_REG3_I2C_DONE_INTR_EN; + dvc_writel(i2c_dev, val, DVC_CTRL_REG3); + + val = dvc_readl(i2c_dev, DVC_CTRL_REG1); + val |= DVC_CTRL_REG1_INTR_EN; + dvc_writel(i2c_dev, val, DVC_CTRL_REG1); +} + +static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) +{ + u32 val; + int err = 0; + + clk_enable(i2c_dev->clk); + + tegra_periph_reset_assert(i2c_dev->clk); + udelay(2); + tegra_periph_reset_deassert(i2c_dev->clk); + + if (i2c_dev->is_dvc) + tegra_dvc_init(i2c_dev); + + val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN; + i2c_writel(i2c_dev, val, I2C_CNFG); + i2c_writel(i2c_dev, 0, I2C_INT_MASK); + clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8); + + val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT | + 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT; + i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL); + + if (tegra_i2c_flush_fifos(i2c_dev)) + err = -ETIMEDOUT; + + clk_disable(i2c_dev->clk); + return err; +} + +static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) +{ + u32 status; + const u32 status_err = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; + struct tegra_i2c_dev *i2c_dev = dev_id; + + status = i2c_readl(i2c_dev, I2C_INT_STATUS); + + if (status == 0) { + dev_warn(i2c_dev->dev, "interrupt with no status\n"); + return IRQ_NONE; + } + + if (unlikely(status & status_err)) { + if (status & I2C_INT_NO_ACK) + i2c_dev->msg_err |= I2C_ERR_NO_ACK; + if (status & I2C_INT_ARBITRATION_LOST) + i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST; + complete(&i2c_dev->msg_complete); + goto err; + } + + if (i2c_dev->msg_read && (status & I2C_INT_RX_FIFO_DATA_REQ)) { + if (i2c_dev->msg_buf_remaining) + tegra_i2c_empty_rx_fifo(i2c_dev); + else + BUG(); + } + + if (!i2c_dev->msg_read && (status & I2C_INT_TX_FIFO_DATA_REQ)) { + if (i2c_dev->msg_buf_remaining) + tegra_i2c_fill_tx_fifo(i2c_dev); + else + tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); + } + + if ((status & I2C_INT_PACKET_XFER_COMPLETE) && + !i2c_dev->msg_buf_remaining) + complete(&i2c_dev->msg_complete); + + i2c_writel(i2c_dev, status, I2C_INT_STATUS); + if (i2c_dev->is_dvc) + dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS); + return IRQ_HANDLED; +err: + /* An error occured, mask all interrupts */ + tegra_i2c_mask_irq(i2c_dev, I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST | + I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ | + I2C_INT_RX_FIFO_DATA_REQ); + i2c_writel(i2c_dev, status, I2C_INT_STATUS); + return IRQ_HANDLED; +} + +static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, + struct i2c_msg *msg, int stop) +{ + u32 packet_header; + u32 int_mask; + int ret; + + tegra_i2c_flush_fifos(i2c_dev); + i2c_writel(i2c_dev, 0xFF, I2C_INT_STATUS); + + if (msg->len == 0) + return -EINVAL; + + i2c_dev->msg_buf = msg->buf; + i2c_dev->msg_buf_remaining = msg->len; + i2c_dev->msg_err = I2C_ERR_NONE; + i2c_dev->msg_read = (msg->flags & I2C_M_RD); + INIT_COMPLETION(i2c_dev->msg_complete); + + packet_header = (0 << PACKET_HEADER0_HEADER_SIZE_SHIFT) | + PACKET_HEADER0_PROTOCOL_I2C | + (i2c_dev->cont_id << PACKET_HEADER0_CONT_ID_SHIFT) | + (1 << PACKET_HEADER0_PACKET_ID_SHIFT); + i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO); + + packet_header = msg->len - 1; + i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO); + + packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT; + packet_header |= I2C_HEADER_IE_ENABLE; + if (msg->flags & I2C_M_TEN) + packet_header |= I2C_HEADER_10BIT_ADDR; + if (msg->flags & I2C_M_IGNORE_NAK) + packet_header |= I2C_HEADER_CONT_ON_NAK; + if (msg->flags & I2C_M_NOSTART) + packet_header |= I2C_HEADER_REPEAT_START; + if (msg->flags & I2C_M_RD) + packet_header |= I2C_HEADER_READ; + i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO); + + if (!(msg->flags & I2C_M_RD)) + tegra_i2c_fill_tx_fifo(i2c_dev); + + int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST; + if (msg->flags & I2C_M_RD) + int_mask |= I2C_INT_RX_FIFO_DATA_REQ; + else if (i2c_dev->msg_buf_remaining) + int_mask |= I2C_INT_TX_FIFO_DATA_REQ; + tegra_i2c_unmask_irq(i2c_dev, int_mask); + dev_dbg(i2c_dev->dev, "unmasked irq: %02x\n", + i2c_readl(i2c_dev, I2C_INT_MASK)); + + ret = wait_for_completion_timeout(&i2c_dev->msg_complete, TEGRA_I2C_TIMEOUT); + tegra_i2c_mask_irq(i2c_dev, int_mask); + + if (WARN_ON(ret == 0)) { + dev_err(i2c_dev->dev, "i2c transfer timed out\n"); + + tegra_i2c_init(i2c_dev); + return -ETIMEDOUT; + } + + dev_dbg(i2c_dev->dev, "transfer complete: %d %d %d\n", + ret, completion_done(&i2c_dev->msg_complete), i2c_dev->msg_err); + + if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) + return 0; + + tegra_i2c_init(i2c_dev); + if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { + if (msg->flags & I2C_M_IGNORE_NAK) + return 0; + return -EREMOTEIO; + } + + return -EIO; +} + +static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], + int num) +{ + struct tegra_i2c_dev *i2c_dev = i2c_get_adapdata(adap); + int i; + int ret = 0; + + if (i2c_dev->is_suspended) + return -EBUSY; + + clk_enable(i2c_dev->clk); + for (i = 0; i < num; i++) { + int stop = (i == (num - 1)) ? 1 : 0; + ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop); + if (ret) + break; + } + clk_disable(i2c_dev->clk); + return ret ?: i; +} + +static u32 tegra_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C; +} + +static const struct i2c_algorithm tegra_i2c_algo = { + .master_xfer = tegra_i2c_xfer, + .functionality = tegra_i2c_func, +}; + +static int tegra_i2c_probe(struct platform_device *pdev) +{ + struct tegra_i2c_dev *i2c_dev; + struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data; + struct resource *res; + struct resource *iomem; + struct clk *clk; + struct clk *i2c_clk; + void *base; + int irq; + int ret = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "no mem resource\n"); + return -EINVAL; + } + iomem = request_mem_region(res->start, resource_size(res), pdev->name); + if (!iomem) { + dev_err(&pdev->dev, "I2C region already claimed\n"); + return -EBUSY; + } + + base = ioremap(iomem->start, resource_size(iomem)); + if (!base) { + dev_err(&pdev->dev, "Cannot ioremap I2C region\n"); + return -ENOMEM; + } + + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res) { + dev_err(&pdev->dev, "no irq resource\n"); + ret = -EINVAL; + goto err_iounmap; + } + irq = res->start; + + clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "missing controller clock"); + ret = PTR_ERR(clk); + goto err_release_region; + } + + i2c_clk = clk_get(&pdev->dev, "i2c"); + if (IS_ERR(i2c_clk)) { + dev_err(&pdev->dev, "missing bus clock"); + ret = PTR_ERR(i2c_clk); + goto err_clk_put; + } + + i2c_dev = kzalloc(sizeof(struct tegra_i2c_dev), GFP_KERNEL); + if (!i2c_dev) { + ret = -ENOMEM; + goto err_i2c_clk_put; + } + + i2c_dev->base = base; + i2c_dev->clk = clk; + i2c_dev->i2c_clk = i2c_clk; + i2c_dev->iomem = iomem; + i2c_dev->adapter.algo = &tegra_i2c_algo; + i2c_dev->irq = irq; + i2c_dev->cont_id = pdev->id; + i2c_dev->dev = &pdev->dev; + i2c_dev->bus_clk_rate = pdata ? pdata->bus_clk_rate : 100000; + + if (pdev->id == 3) + i2c_dev->is_dvc = 1; + init_completion(&i2c_dev->msg_complete); + + platform_set_drvdata(pdev, i2c_dev); + + ret = tegra_i2c_init(i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize i2c controller"); + goto err_free; + } + + ret = request_irq(i2c_dev->irq, tegra_i2c_isr, 0, pdev->name, i2c_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq %i\n", i2c_dev->irq); + goto err_free; + } + + clk_enable(i2c_dev->i2c_clk); + + i2c_set_adapdata(&i2c_dev->adapter, i2c_dev); + i2c_dev->adapter.owner = THIS_MODULE; + i2c_dev->adapter.class = I2C_CLASS_HWMON; + strlcpy(i2c_dev->adapter.name, "Tegra I2C adapter", + sizeof(i2c_dev->adapter.name)); + i2c_dev->adapter.algo = &tegra_i2c_algo; + i2c_dev->adapter.dev.parent = &pdev->dev; + i2c_dev->adapter.nr = pdev->id; + + ret = i2c_add_numbered_adapter(&i2c_dev->adapter); + if (ret) { + dev_err(&pdev->dev, "Failed to add I2C adapter\n"); + goto err_free_irq; + } + + return 0; +err_free_irq: + free_irq(i2c_dev->irq, i2c_dev); +err_free: + kfree(i2c_dev); +err_i2c_clk_put: + clk_put(i2c_clk); +err_clk_put: + clk_put(clk); +err_release_region: + release_mem_region(iomem->start, resource_size(iomem)); +err_iounmap: + iounmap(base); + return ret; +} + +static int tegra_i2c_remove(struct platform_device *pdev) +{ + struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + i2c_del_adapter(&i2c_dev->adapter); + free_irq(i2c_dev->irq, i2c_dev); + clk_put(i2c_dev->i2c_clk); + clk_put(i2c_dev->clk); + release_mem_region(i2c_dev->iomem->start, + resource_size(i2c_dev->iomem)); + iounmap(i2c_dev->base); + kfree(i2c_dev); + return 0; +} + +#ifdef CONFIG_PM +static int tegra_i2c_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + + i2c_lock_adapter(&i2c_dev->adapter); + i2c_dev->is_suspended = true; + i2c_unlock_adapter(&i2c_dev->adapter); + + return 0; +} + +static int tegra_i2c_resume(struct platform_device *pdev) +{ + struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev); + int ret; + + i2c_lock_adapter(&i2c_dev->adapter); + + ret = tegra_i2c_init(i2c_dev); + + if (ret) { + i2c_unlock_adapter(&i2c_dev->adapter); + return ret; + } + + i2c_dev->is_suspended = false; + + i2c_unlock_adapter(&i2c_dev->adapter); + + return 0; +} +#endif + +static struct platform_driver tegra_i2c_driver = { + .probe = tegra_i2c_probe, + .remove = tegra_i2c_remove, +#ifdef CONFIG_PM + .suspend = tegra_i2c_suspend, + .resume = tegra_i2c_resume, +#endif + .driver = { + .name = "tegra-i2c", + .owner = THIS_MODULE, + }, +}; + +static int __init tegra_i2c_init_driver(void) +{ + return platform_driver_register(&tegra_i2c_driver); +} + +static void __exit tegra_i2c_exit_driver(void) +{ + platform_driver_unregister(&tegra_i2c_driver); +} + +subsys_initcall(tegra_i2c_init_driver); +module_exit(tegra_i2c_exit_driver); + +MODULE_DESCRIPTION("nVidia Tegra2 I2C Bus Controller driver"); +MODULE_AUTHOR("Colin Cross"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index bab9f74c0665..cfed5399f074 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c @@ -53,8 +53,8 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages, } /* call with current->mm->mmap_sem held */ -static int __get_user_pages(unsigned long start_page, size_t num_pages, - struct page **p, struct vm_area_struct **vma) +static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages, + struct page **p, struct vm_area_struct **vma) { unsigned long lock_limit; size_t got; @@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages, down_write(¤t->mm->mmap_sem); - ret = __get_user_pages(start_page, num_pages, p, NULL); + ret = __ipath_get_user_pages(start_page, num_pages, p, NULL); up_write(¤t->mm->mmap_sem); diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index d7a26c1d4f37..7689e49c13c9 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -51,8 +51,8 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages, /* * Call with current->mm->mmap_sem held. */ -static int __get_user_pages(unsigned long start_page, size_t num_pages, - struct page **p, struct vm_area_struct **vma) +static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, + struct page **p, struct vm_area_struct **vma) { unsigned long lock_limit; size_t got; @@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, down_write(¤t->mm->mmap_sem); - ret = __get_user_pages(start_page, num_pages, p, NULL); + ret = __qib_get_user_pages(start_page, num_pages, p, NULL); up_write(¤t->mm->mmap_sem); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 7b2fc98e2f2b..8db008de5392 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -532,6 +532,29 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s stats->custom[3].value = conn->fmr_unalign_cnt; } +static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf) +{ + struct iser_conn *ib_conn = ep->dd_data; + int len; + + switch (param) { + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + if (!ib_conn || !ib_conn->cma_id) + return -ENOTCONN; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &ib_conn->cma_id->route.addr.dst_addr, + param, buf); + break; + default: + return -ENOSYS; + } + + return len; +} + static struct iscsi_endpoint * iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, int non_blocking) @@ -637,6 +660,8 @@ static struct iscsi_transport iscsi_iser_transport = { ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | ISCSI_DATASEQ_INORDER_EN | + ISCSI_CONN_PORT | + ISCSI_CONN_ADDRESS | ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | ISCSI_PERSISTENT_ADDRESS | @@ -659,6 +684,7 @@ static struct iscsi_transport iscsi_iser_transport = { .destroy_conn = iscsi_iser_conn_destroy, .set_param = iscsi_iser_set_param, .get_conn_param = iscsi_conn_get_param, + .get_ep_param = iscsi_iser_get_ep_param, .get_session_param = iscsi_session_get_param, .start_conn = iscsi_iser_conn_start, .stop_conn = iscsi_iser_conn_stop, diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c index 92563a681d65..12abc50508e5 100644 --- a/drivers/input/serio/ambakmi.c +++ b/drivers/input/serio/ambakmi.c @@ -107,7 +107,8 @@ static void amba_kmi_close(struct serio *io) clk_disable(kmi->clk); } -static int __devinit amba_kmi_probe(struct amba_device *dev, struct amba_id *id) +static int __devinit amba_kmi_probe(struct amba_device *dev, + const struct amba_id *id) { struct amba_kmi_port *kmi; struct serio *io; diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c index 199f374cf9da..f6e108d0125f 100644 --- a/drivers/isdn/mISDN/hwchannel.c +++ b/drivers/isdn/mISDN/hwchannel.c @@ -206,7 +206,7 @@ recv_Bchannel(struct bchannel *bch, unsigned int id) hh->id = id; if (bch->rcount >= 64) { printk(KERN_WARNING "B-channel %p receive queue overflow, " - "fushing!\n", bch); + "flushing!\n", bch); skb_queue_purge(&bch->rqueue); bch->rcount = 0; return; @@ -231,7 +231,7 @@ recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb) { if (bch->rcount >= 64) { printk(KERN_WARNING "B-channel %p receive queue overflow, " - "fushing!\n", bch); + "flushing!\n", bch); skb_queue_purge(&bch->rqueue); bch->rcount = 0; } @@ -279,7 +279,7 @@ confirm_Bsend(struct bchannel *bch) if (bch->rcount >= 64) { printk(KERN_WARNING "B-channel %p receive queue overflow, " - "fushing!\n", bch); + "flushing!\n", bch); skb_queue_purge(&bch->rqueue); bch->rcount = 0; } diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 39f660b2a60d..2637c139777b 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -283,7 +283,7 @@ static void __devinit rackmeter_init_cpu_sniffer(struct rackmeter *rm) } } -static void __devexit rackmeter_stop_cpu_sniffer(struct rackmeter *rm) +static void rackmeter_stop_cpu_sniffer(struct rackmeter *rm) { cancel_delayed_work_sync(&rm->cpu[0].sniffer); cancel_delayed_work_sync(&rm->cpu[1].sniffer); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index b82d28819e2a..4b0b63c290a6 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1283,24 +1283,22 @@ static int do_end_io(struct multipath *m, struct request *clone, if (!error && !clone->errors) return 0; /* I/O complete */ - if (error == -EOPNOTSUPP) - return error; - - if (clone->cmd_flags & REQ_DISCARD) - /* - * Pass all discard request failures up. - * FIXME: only fail_path if the discard failed due to a - * transport problem. This requires precise understanding - * of the underlying failure (e.g. the SCSI sense). - */ + if (error == -EOPNOTSUPP || error == -EREMOTEIO) return error; if (mpio->pgpath) fail_path(mpio->pgpath); spin_lock_irqsave(&m->lock, flags); - if (!m->nr_valid_paths && !m->queue_if_no_path && !__must_push_back(m)) - r = -EIO; + if (!m->nr_valid_paths) { + if (!m->queue_if_no_path) { + if (!__must_push_back(m)) + r = -EIO; + } else { + if (error == -EBADE) + r = error; + } + } spin_unlock_irqrestore(&m->lock, flags); return r; diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h index 013c7d881948..22027e7946f7 100644 --- a/drivers/message/fusion/lsi/mpi_cnfg.h +++ b/drivers/message/fusion/lsi/mpi_cnfg.h @@ -2593,6 +2593,7 @@ typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0 #define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE (0x03) #define MPI_SAS_IOUNIT0_RATE_1_5 (0x08) #define MPI_SAS_IOUNIT0_RATE_3_0 (0x09) +#define MPI_SAS_IOUNIT0_RATE_6_0 (0x0A) /* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */ diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h index 8faa4fab7b89..fd6222882a0e 100644 --- a/drivers/message/fusion/lsi/mpi_ioc.h +++ b/drivers/message/fusion/lsi/mpi_ioc.h @@ -841,6 +841,7 @@ typedef struct _EVENT_DATA_SAS_PHY_LINK_STATUS #define MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE (0x03) #define MPI_EVENT_SAS_PLS_LR_RATE_1_5 (0x08) #define MPI_EVENT_SAS_PLS_LR_RATE_3_0 (0x09) +#define MPI_EVENT_SAS_PLS_LR_RATE_6_0 (0x0A) /* SAS Discovery Event data */ diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 3358c0af3466..ec8080c98081 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -7418,7 +7418,12 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) case MPI_EVENT_SAS_PLS_LR_RATE_3_0: snprintf(evStr, EVENT_DESCR_STR_SZ, "SAS PHY Link Status: Phy=%d:" - " Rate 3.0 Gpbs",PhyNumber); + " Rate 3.0 Gbps", PhyNumber); + break; + case MPI_EVENT_SAS_PLS_LR_RATE_6_0: + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS PHY Link Status: Phy=%d:" + " Rate 6.0 Gbps", PhyNumber); break; default: snprintf(evStr, EVENT_DESCR_STR_SZ, diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index e8deb8ed0499..878bda0cce70 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -1314,8 +1314,10 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size) else karg->adapterType = MPT_IOCTL_INTERFACE_SCSI; - if (karg->hdr.port > 1) + if (karg->hdr.port > 1) { + kfree(karg); return -EINVAL; + } port = karg->hdr.port; karg->port = port; diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 8aefb1829fcd..f5a14afad2cd 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1973,7 +1973,6 @@ static struct scsi_host_template mptsas_driver_template = { .change_queue_depth = mptscsih_change_queue_depth, .eh_abort_handler = mptscsih_abort, .eh_device_reset_handler = mptscsih_dev_reset, - .eh_bus_reset_handler = mptscsih_bus_reset, .eh_host_reset_handler = mptscsih_host_reset, .bios_param = mptscsih_bios_param, .can_queue = MPT_SAS_CAN_QUEUE, @@ -3063,6 +3062,9 @@ static int mptsas_probe_one_phy(struct device *dev, case MPI_SAS_IOUNIT0_RATE_3_0: phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; break; + case MPI_SAS_IOUNIT0_RATE_6_0: + phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; + break; case MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE: case MPI_SAS_IOUNIT0_RATE_UNKNOWN: default: @@ -3691,7 +3693,8 @@ mptsas_send_link_status_event(struct fw_event_work *fw_event) } if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 || - link_rate == MPI_SAS_IOUNIT0_RATE_3_0) { + link_rate == MPI_SAS_IOUNIT0_RATE_3_0 || + link_rate == MPI_SAS_IOUNIT0_RATE_6_0) { if (!port_info) { if (ioc->old_sas_discovery_protocal) { diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index 7d3cc575c361..098de2b35784 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c @@ -1044,8 +1044,7 @@ static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) static int cfg_open(struct inode *inode, struct file *file) { - struct i2o_cfg_info *tmp = - (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info), + struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info), GFP_KERNEL); unsigned long flags; diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index a6dfa37a674d..fdca643249e1 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -81,6 +81,17 @@ config MFD_DM355EVM_MSP boards. MSP430 firmware manages resets and power sequencing, inputs from buttons and the IR remote, LEDs, an RTC, and more. +config MFD_TI_SSP + tristate "TI Sequencer Serial Port support" + depends on ARCH_DAVINCI_TNETV107X + select MFD_CORE + ---help--- + Say Y here if you want support for the Sequencer Serial Port + in a Texas Instruments TNETV107X SoC. + + To compile this driver as a module, choose M here: the + module will be called ti-ssp. + config HTC_EGPIO bool "HTC EGPIO support" depends on GENERIC_HARDIRQS && GPIOLIB && ARM diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 91fe384459ab..f0e25cad762e 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_HTC_I2CPLD) += htc-i2cpld.o obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o +obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o obj-$(CONFIG_MFD_STMPE) += stmpe.o obj-$(CONFIG_MFD_TC3589X) += tc3589x.o diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c new file mode 100644 index 000000000000..af9ab0e5ca64 --- /dev/null +++ b/drivers/mfd/ti-ssp.c @@ -0,0 +1,476 @@ +/* + * Sequencer Serial Port (SSP) driver for Texas Instruments' SoCs + * + * Copyright (C) 2010 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/wait.h> +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/mfd/core.h> +#include <linux/mfd/ti_ssp.h> + +/* Register Offsets */ +#define REG_REV 0x00 +#define REG_IOSEL_1 0x04 +#define REG_IOSEL_2 0x08 +#define REG_PREDIV 0x0c +#define REG_INTR_ST 0x10 +#define REG_INTR_EN 0x14 +#define REG_TEST_CTRL 0x18 + +/* Per port registers */ +#define PORT_CFG_2 0x00 +#define PORT_ADDR 0x04 +#define PORT_DATA 0x08 +#define PORT_CFG_1 0x0c +#define PORT_STATE 0x10 + +#define SSP_PORT_CONFIG_MASK (SSP_EARLY_DIN | SSP_DELAY_DOUT) +#define SSP_PORT_CLKRATE_MASK 0x0f + +#define SSP_SEQRAM_WR_EN BIT(4) +#define SSP_SEQRAM_RD_EN BIT(5) +#define SSP_START BIT(15) +#define SSP_BUSY BIT(10) +#define SSP_PORT_ASL BIT(7) +#define SSP_PORT_CFO1 BIT(6) + +#define SSP_PORT_SEQRAM_SIZE 32 + +static const int ssp_port_base[] = {0x040, 0x080}; +static const int ssp_port_seqram[] = {0x100, 0x180}; + +struct ti_ssp { + struct resource *res; + struct device *dev; + void __iomem *regs; + spinlock_t lock; + struct clk *clk; + int irq; + wait_queue_head_t wqh; + + /* + * Some of the iosel2 register bits always read-back as 0, we need to + * remember these values so that we don't clobber previously set + * values. + */ + u32 iosel2; +}; + +static inline struct ti_ssp *dev_to_ssp(struct device *dev) +{ + return dev_get_drvdata(dev->parent); +} + +static inline int dev_to_port(struct device *dev) +{ + return to_platform_device(dev)->id; +} + +/* Register Access Helpers, rmw() functions need to run locked */ +static inline u32 ssp_read(struct ti_ssp *ssp, int reg) +{ + return __raw_readl(ssp->regs + reg); +} + +static inline void ssp_write(struct ti_ssp *ssp, int reg, u32 val) +{ + __raw_writel(val, ssp->regs + reg); +} + +static inline void ssp_rmw(struct ti_ssp *ssp, int reg, u32 mask, u32 bits) +{ + ssp_write(ssp, reg, (ssp_read(ssp, reg) & ~mask) | bits); +} + +static inline u32 ssp_port_read(struct ti_ssp *ssp, int port, int reg) +{ + return ssp_read(ssp, ssp_port_base[port] + reg); +} + +static inline void ssp_port_write(struct ti_ssp *ssp, int port, int reg, + u32 val) +{ + ssp_write(ssp, ssp_port_base[port] + reg, val); +} + +static inline void ssp_port_rmw(struct ti_ssp *ssp, int port, int reg, + u32 mask, u32 bits) +{ + ssp_rmw(ssp, ssp_port_base[port] + reg, mask, bits); +} + +static inline void ssp_port_clr_bits(struct ti_ssp *ssp, int port, int reg, + u32 bits) +{ + ssp_port_rmw(ssp, port, reg, bits, 0); +} + +static inline void ssp_port_set_bits(struct ti_ssp *ssp, int port, int reg, + u32 bits) +{ + ssp_port_rmw(ssp, port, reg, 0, bits); +} + +/* Called to setup port clock mode, caller must hold ssp->lock */ +static int __set_mode(struct ti_ssp *ssp, int port, int mode) +{ + mode &= SSP_PORT_CONFIG_MASK; + ssp_port_rmw(ssp, port, PORT_CFG_1, SSP_PORT_CONFIG_MASK, mode); + + return 0; +} + +int ti_ssp_set_mode(struct device *dev, int mode) +{ + struct ti_ssp *ssp = dev_to_ssp(dev); + int port = dev_to_port(dev); + int ret; + + spin_lock(&ssp->lock); + ret = __set_mode(ssp, port, mode); + spin_unlock(&ssp->lock); + + return ret; +} +EXPORT_SYMBOL(ti_ssp_set_mode); + +/* Called to setup iosel2, caller must hold ssp->lock */ +static void __set_iosel2(struct ti_ssp *ssp, u32 mask, u32 val) +{ + ssp->iosel2 = (ssp->iosel2 & ~mask) | val; + ssp_write(ssp, REG_IOSEL_2, ssp->iosel2); +} + +/* Called to setup port iosel, caller must hold ssp->lock */ +static void __set_iosel(struct ti_ssp *ssp, int port, u32 iosel) +{ + unsigned val, shift = port ? 16 : 0; + + /* IOSEL1 gets the least significant 16 bits */ + val = ssp_read(ssp, REG_IOSEL_1); + val &= 0xffff << (port ? 0 : 16); + val |= (iosel & 0xffff) << (port ? 16 : 0); + ssp_write(ssp, REG_IOSEL_1, val); + + /* IOSEL2 gets the most significant 16 bits */ + val = (iosel >> 16) & 0x7; + __set_iosel2(ssp, 0x7 << shift, val << shift); +} + +int ti_ssp_set_iosel(struct device *dev, u32 iosel) +{ + struct ti_ssp *ssp = dev_to_ssp(dev); + int port = dev_to_port(dev); + + spin_lock(&ssp->lock); + __set_iosel(ssp, port, iosel); + spin_unlock(&ssp->lock); + + return 0; +} +EXPORT_SYMBOL(ti_ssp_set_iosel); + +int ti_ssp_load(struct device *dev, int offs, u32* prog, int len) +{ + struct ti_ssp *ssp = dev_to_ssp(dev); + int port = dev_to_port(dev); + int i; + + if (len > SSP_PORT_SEQRAM_SIZE) + return -ENOSPC; + + spin_lock(&ssp->lock); + + /* Enable SeqRAM access */ + ssp_port_set_bits(ssp, port, PORT_CFG_2, SSP_SEQRAM_WR_EN); + + /* Copy code */ + for (i = 0; i < len; i++) { + __raw_writel(prog[i], ssp->regs + offs + 4*i + + ssp_port_seqram[port]); + } + + /* Disable SeqRAM access */ + ssp_port_clr_bits(ssp, port, PORT_CFG_2, SSP_SEQRAM_WR_EN); + + spin_unlock(&ssp->lock); + + return 0; +} +EXPORT_SYMBOL(ti_ssp_load); + +int ti_ssp_raw_read(struct device *dev) +{ + struct ti_ssp *ssp = dev_to_ssp(dev); + int port = dev_to_port(dev); + int shift = port ? 27 : 11; + + return (ssp_read(ssp, REG_IOSEL_2) >> shift) & 0xf; +} +EXPORT_SYMBOL(ti_ssp_raw_read); + +int ti_ssp_raw_write(struct device *dev, u32 val) +{ + struct ti_ssp *ssp = dev_to_ssp(dev); + int port = dev_to_port(dev), shift; + + spin_lock(&ssp->lock); + + shift = port ? 22 : 6; + val &= 0xf; + __set_iosel2(ssp, 0xf << shift, val << shift); + + spin_unlock(&ssp->lock); + + return 0; +} +EXPORT_SYMBOL(ti_ssp_raw_write); + +static inline int __xfer_done(struct ti_ssp *ssp, int port) +{ + return !(ssp_port_read(ssp, port, PORT_CFG_1) & SSP_BUSY); +} + +int ti_ssp_run(struct device *dev, u32 pc, u32 input, u32 *output) +{ + struct ti_ssp *ssp = dev_to_ssp(dev); + int port = dev_to_port(dev); + int ret; + + if (pc & ~(0x3f)) + return -EINVAL; + + /* Grab ssp->lock to serialize rmw on ssp registers */ + spin_lock(&ssp->lock); + + ssp_port_write(ssp, port, PORT_ADDR, input >> 16); + ssp_port_write(ssp, port, PORT_DATA, input & 0xffff); + ssp_port_rmw(ssp, port, PORT_CFG_1, 0x3f, pc); + + /* grab wait queue head lock to avoid race with the isr */ + spin_lock_irq(&ssp->wqh.lock); + + /* kick off sequence execution in hardware */ + ssp_port_set_bits(ssp, port, PORT_CFG_1, SSP_START); + + /* drop ssp lock; no register writes beyond this */ + spin_unlock(&ssp->lock); + + ret = wait_event_interruptible_locked_irq(ssp->wqh, + __xfer_done(ssp, port)); + spin_unlock_irq(&ssp->wqh.lock); + + if (ret < 0) + return ret; + + if (output) { + *output = (ssp_port_read(ssp, port, PORT_ADDR) << 16) | + (ssp_port_read(ssp, port, PORT_DATA) & 0xffff); + } + + ret = ssp_port_read(ssp, port, PORT_STATE) & 0x3f; /* stop address */ + + return ret; +} +EXPORT_SYMBOL(ti_ssp_run); + +static irqreturn_t ti_ssp_interrupt(int irq, void *dev_data) +{ + struct ti_ssp *ssp = dev_data; + + spin_lock(&ssp->wqh.lock); + + ssp_write(ssp, REG_INTR_ST, 0x3); + wake_up_locked(&ssp->wqh); + + spin_unlock(&ssp->wqh.lock); + + return IRQ_HANDLED; +} + +static int __devinit ti_ssp_probe(struct platform_device *pdev) +{ + static struct ti_ssp *ssp; + const struct ti_ssp_data *pdata = pdev->dev.platform_data; + int error = 0, prediv = 0xff, id; + unsigned long sysclk; + struct device *dev = &pdev->dev; + struct mfd_cell cells[2]; + + ssp = kzalloc(sizeof(*ssp), GFP_KERNEL); + if (!ssp) { + dev_err(dev, "cannot allocate device info\n"); + return -ENOMEM; + } + + ssp->dev = dev; + dev_set_drvdata(dev, ssp); + + ssp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!ssp->res) { + error = -ENODEV; + dev_err(dev, "cannot determine register area\n"); + goto error_res; + } + + if (!request_mem_region(ssp->res->start, resource_size(ssp->res), + pdev->name)) { + error = -ENOMEM; + dev_err(dev, "cannot claim register memory\n"); + goto error_res; + } + + ssp->regs = ioremap(ssp->res->start, resource_size(ssp->res)); + if (!ssp->regs) { + error = -ENOMEM; + dev_err(dev, "cannot map register memory\n"); + goto error_map; + } + + ssp->clk = clk_get(dev, NULL); + if (IS_ERR(ssp->clk)) { + error = PTR_ERR(ssp->clk); + dev_err(dev, "cannot claim device clock\n"); + goto error_clk; + } + + ssp->irq = platform_get_irq(pdev, 0); + if (ssp->irq < 0) { + error = -ENODEV; + dev_err(dev, "unknown irq\n"); + goto error_irq; + } + + error = request_threaded_irq(ssp->irq, NULL, ti_ssp_interrupt, 0, + dev_name(dev), ssp); + if (error < 0) { + dev_err(dev, "cannot acquire irq\n"); + goto error_irq; + } + + spin_lock_init(&ssp->lock); + init_waitqueue_head(&ssp->wqh); + + /* Power on and initialize SSP */ + error = clk_enable(ssp->clk); + if (error) { + dev_err(dev, "cannot enable device clock\n"); + goto error_enable; + } + + /* Reset registers to a sensible known state */ + ssp_write(ssp, REG_IOSEL_1, 0); + ssp_write(ssp, REG_IOSEL_2, 0); + ssp_write(ssp, REG_INTR_EN, 0x3); + ssp_write(ssp, REG_INTR_ST, 0x3); + ssp_write(ssp, REG_TEST_CTRL, 0); + ssp_port_write(ssp, 0, PORT_CFG_1, SSP_PORT_ASL); + ssp_port_write(ssp, 1, PORT_CFG_1, SSP_PORT_ASL); + ssp_port_write(ssp, 0, PORT_CFG_2, SSP_PORT_CFO1); + ssp_port_write(ssp, 1, PORT_CFG_2, SSP_PORT_CFO1); + + sysclk = clk_get_rate(ssp->clk); + if (pdata && pdata->out_clock) + prediv = (sysclk / pdata->out_clock) - 1; + prediv = clamp(prediv, 0, 0xff); + ssp_rmw(ssp, REG_PREDIV, 0xff, prediv); + + memset(cells, 0, sizeof(cells)); + for (id = 0; id < 2; id++) { + const struct ti_ssp_dev_data *data = &pdata->dev_data[id]; + + cells[id].id = id; + cells[id].name = data->dev_name; + cells[id].platform_data = data->pdata; + cells[id].data_size = data->pdata_size; + } + + error = mfd_add_devices(dev, 0, cells, 2, NULL, 0); + if (error < 0) { + dev_err(dev, "cannot add mfd cells\n"); + goto error_enable; + } + + return 0; + +error_enable: + free_irq(ssp->irq, ssp); +error_irq: + clk_put(ssp->clk); +error_clk: + iounmap(ssp->regs); +error_map: + release_mem_region(ssp->res->start, resource_size(ssp->res)); +error_res: + kfree(ssp); + return error; +} + +static int __devexit ti_ssp_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct ti_ssp *ssp = dev_get_drvdata(dev); + + mfd_remove_devices(dev); + clk_disable(ssp->clk); + free_irq(ssp->irq, ssp); + clk_put(ssp->clk); + iounmap(ssp->regs); + release_mem_region(ssp->res->start, resource_size(ssp->res)); + kfree(ssp); + dev_set_drvdata(dev, NULL); + return 0; +} + +static struct platform_driver ti_ssp_driver = { + .probe = ti_ssp_probe, + .remove = __devexit_p(ti_ssp_remove), + .driver = { + .name = "ti-ssp", + .owner = THIS_MODULE, + } +}; + +static int __init ti_ssp_init(void) +{ + return platform_driver_register(&ti_ssp_driver); +} +module_init(ti_ssp_init); + +static void __exit ti_ssp_exit(void) +{ + platform_driver_unregister(&ti_ssp_driver); +} +module_exit(ti_ssp_exit); + +MODULE_DESCRIPTION("Sequencer Serial Port (SSP) Driver"); +MODULE_AUTHOR("Cyril Chemparathy"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ti-ssp"); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index afe8c6fa166a..54f91321749a 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -225,7 +225,7 @@ config MMC_OMAP config MMC_OMAP_HS tristate "TI OMAP High Speed Multimedia Card Interface support" - depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 + depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 help This selects the TI OMAP High Speed Multimedia card Interface. If you have an OMAP2430 or OMAP3 board or OMAP4 board with a diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 2d6de3e03e2d..5bbb87d10251 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -2,7 +2,7 @@ * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver * * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. - * Copyright (C) 2010 ST-Ericsson AB. + * Copyright (C) 2010 ST-Ericsson SA * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -25,8 +25,10 @@ #include <linux/clk.h> #include <linux/scatterlist.h> #include <linux/gpio.h> -#include <linux/amba/mmci.h> #include <linux/regulator/consumer.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/amba/mmci.h> #include <asm/div64.h> #include <asm/io.h> @@ -142,9 +144,6 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) host->mrq = NULL; host->cmd = NULL; - if (mrq->data) - mrq->data->bytes_xfered = host->data_xfered; - /* * Need to drop the host lock here; mmc_request_done may call * back into the driver... @@ -189,6 +188,248 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); } +/* + * All the DMA operation mode stuff goes inside this ifdef. + * This assumes that you have a generic DMA device interface, + * no custom DMA interfaces are supported. + */ +#ifdef CONFIG_DMA_ENGINE +static void __devinit mmci_dma_setup(struct mmci_host *host) +{ + struct mmci_platform_data *plat = host->plat; + const char *rxname, *txname; + dma_cap_mask_t mask; + + if (!plat || !plat->dma_filter) { + dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); + return; + } + + /* Try to acquire a generic DMA engine slave channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + /* + * If only an RX channel is specified, the driver will + * attempt to use it bidirectionally, however if it is + * is specified but cannot be located, DMA will be disabled. + */ + if (plat->dma_rx_param) { + host->dma_rx_channel = dma_request_channel(mask, + plat->dma_filter, + plat->dma_rx_param); + /* E.g if no DMA hardware is present */ + if (!host->dma_rx_channel) + dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); + } + + if (plat->dma_tx_param) { + host->dma_tx_channel = dma_request_channel(mask, + plat->dma_filter, + plat->dma_tx_param); + if (!host->dma_tx_channel) + dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); + } else { + host->dma_tx_channel = host->dma_rx_channel; + } + + if (host->dma_rx_channel) + rxname = dma_chan_name(host->dma_rx_channel); + else + rxname = "none"; + + if (host->dma_tx_channel) + txname = dma_chan_name(host->dma_tx_channel); + else + txname = "none"; + + dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", + rxname, txname); + + /* + * Limit the maximum segment size in any SG entry according to + * the parameters of the DMA engine device. + */ + if (host->dma_tx_channel) { + struct device *dev = host->dma_tx_channel->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } + if (host->dma_rx_channel) { + struct device *dev = host->dma_rx_channel->device->dev; + unsigned int max_seg_size = dma_get_max_seg_size(dev); + + if (max_seg_size < host->mmc->max_seg_size) + host->mmc->max_seg_size = max_seg_size; + } +} + +/* + * This is used in __devinit or __devexit so inline it + * so it can be discarded. + */ +static inline void mmci_dma_release(struct mmci_host *host) +{ + struct mmci_platform_data *plat = host->plat; + + if (host->dma_rx_channel) + dma_release_channel(host->dma_rx_channel); + if (host->dma_tx_channel && plat->dma_tx_param) + dma_release_channel(host->dma_tx_channel); + host->dma_rx_channel = host->dma_tx_channel = NULL; +} + +static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) +{ + struct dma_chan *chan = host->dma_current; + enum dma_data_direction dir; + u32 status; + int i; + + /* Wait up to 1ms for the DMA to complete */ + for (i = 0; ; i++) { + status = readl(host->base + MMCISTATUS); + if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) + break; + udelay(10); + } + + /* + * Check to see whether we still have some data left in the FIFO - + * this catches DMA controllers which are unable to monitor the + * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- + * contiguous buffers. On TX, we'll get a FIFO underrun error. + */ + if (status & MCI_RXDATAAVLBLMASK) { + dmaengine_terminate_all(chan); + if (!data->error) + data->error = -EIO; + } + + if (data->flags & MMC_DATA_WRITE) { + dir = DMA_TO_DEVICE; + } else { + dir = DMA_FROM_DEVICE; + } + + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); + + /* + * Use of DMA with scatter-gather is impossible. + * Give up with DMA and switch back to PIO mode. + */ + if (status & MCI_RXDATAAVLBLMASK) { + dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); + mmci_dma_release(host); + } +} + +static void mmci_dma_data_error(struct mmci_host *host) +{ + dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); + dmaengine_terminate_all(host->dma_current); +} + +static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +{ + struct variant_data *variant = host->variant; + struct dma_slave_config conf = { + .src_addr = host->phybase + MMCIFIFO, + .dst_addr = host->phybase + MMCIFIFO, + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ + .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ + }; + struct mmc_data *data = host->data; + struct dma_chan *chan; + struct dma_device *device; + struct dma_async_tx_descriptor *desc; + int nr_sg; + + host->dma_current = NULL; + + if (data->flags & MMC_DATA_READ) { + conf.direction = DMA_FROM_DEVICE; + chan = host->dma_rx_channel; + } else { + conf.direction = DMA_TO_DEVICE; + chan = host->dma_tx_channel; + } + + /* If there's no DMA channel, fall back to PIO */ + if (!chan) + return -EINVAL; + + /* If less than or equal to the fifo size, don't bother with DMA */ + if (host->size <= variant->fifosize) + return -EINVAL; + + device = chan->device; + nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction); + if (nr_sg == 0) + return -EINVAL; + + dmaengine_slave_config(chan, &conf); + desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, + conf.direction, DMA_CTRL_ACK); + if (!desc) + goto unmap_exit; + + /* Okay, go for it. */ + host->dma_current = chan; + + dev_vdbg(mmc_dev(host->mmc), + "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", + data->sg_len, data->blksz, data->blocks, data->flags); + dmaengine_submit(desc); + dma_async_issue_pending(chan); + + datactrl |= MCI_DPSM_DMAENABLE; + + /* Trigger the DMA transfer */ + writel(datactrl, host->base + MMCIDATACTRL); + + /* + * Let the MMCI say when the data is ended and it's time + * to fire next DMA request. When that happens, MMCI will + * call mmci_data_end() + */ + writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, + host->base + MMCIMASK0); + return 0; + +unmap_exit: + dmaengine_terminate_all(chan); + dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction); + return -ENOMEM; +} +#else +/* Blank functions if the DMA engine is not available */ +static inline void mmci_dma_setup(struct mmci_host *host) +{ +} + +static inline void mmci_dma_release(struct mmci_host *host) +{ +} + +static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) +{ +} + +static inline void mmci_dma_data_error(struct mmci_host *host) +{ +} + +static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +{ + return -ENOSYS; +} +#endif + static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) { struct variant_data *variant = host->variant; @@ -202,9 +443,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) host->data = data; host->size = data->blksz * data->blocks; - host->data_xfered = 0; - - mmci_init_sg(host, data); + data->bytes_xfered = 0; clks = (unsigned long long)data->timeout_ns * host->cclk; do_div(clks, 1000000000UL); @@ -219,15 +458,29 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) BUG_ON(1 << blksz_bits != data->blksz); datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; - if (data->flags & MMC_DATA_READ) { + + if (data->flags & MMC_DATA_READ) datactrl |= MCI_DPSM_DIRECTION; + + /* + * Attempt to use DMA operation mode, if this + * should fail, fall back to PIO mode + */ + if (!mmci_dma_start_data(host, datactrl)) + return; + + /* IRQ mode, map the SG list for CPU reading/writing */ + mmci_init_sg(host, data); + + if (data->flags & MMC_DATA_READ) { irqmask = MCI_RXFIFOHALFFULLMASK; /* - * If we have less than a FIFOSIZE of bytes to transfer, - * trigger a PIO interrupt as soon as any data is available. + * If we have less than the fifo 'half-full' threshold to + * transfer, trigger a PIO interrupt as soon as any data + * is available. */ - if (host->size < variant->fifosize) + if (host->size < variant->fifohalfsize) irqmask |= MCI_RXDATAAVLBLMASK; } else { /* @@ -283,49 +536,51 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { u32 remain, success; - /* Calculate how far we are into the transfer */ + /* Terminate the DMA transfer */ + if (dma_inprogress(host)) + mmci_dma_data_error(host); + + /* + * Calculate how far we are into the transfer. Note that + * the data counter gives the number of bytes transferred + * on the MMC bus, not on the host side. On reads, this + * can be as much as a FIFO-worth of data ahead. This + * matters for FIFO overruns only. + */ remain = readl(host->base + MMCIDATACNT); success = data->blksz * data->blocks - remain; - dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); + dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", + status, success); if (status & MCI_DATACRCFAIL) { /* Last block was not successful */ - host->data_xfered = round_down(success - 1, data->blksz); + success -= 1; data->error = -EILSEQ; } else if (status & MCI_DATATIMEOUT) { - host->data_xfered = round_down(success, data->blksz); data->error = -ETIMEDOUT; - } else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) { - host->data_xfered = round_down(success, data->blksz); + } else if (status & MCI_TXUNDERRUN) { + data->error = -EIO; + } else if (status & MCI_RXOVERRUN) { + if (success > host->variant->fifosize) + success -= host->variant->fifosize; + else + success = 0; data->error = -EIO; } - - /* - * We hit an error condition. Ensure that any data - * partially written to a page is properly coherent. - */ - if (data->flags & MMC_DATA_READ) { - struct sg_mapping_iter *sg_miter = &host->sg_miter; - unsigned long flags; - - local_irq_save(flags); - if (sg_miter_next(sg_miter)) { - flush_dcache_page(sg_miter->page); - sg_miter_stop(sg_miter); - } - local_irq_restore(flags); - } + data->bytes_xfered = round_down(success, data->blksz); } if (status & MCI_DATABLOCKEND) dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); if (status & MCI_DATAEND || data->error) { + if (dma_inprogress(host)) + mmci_dma_unmap(host, data); mmci_stop_data(host); if (!data->error) /* The error clause is handled above, success! */ - host->data_xfered += data->blksz * data->blocks; + data->bytes_xfered = data->blksz * data->blocks; if (!data->stop) { mmci_request_end(host, data->mrq); @@ -498,9 +753,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) if (remain) break; - if (status & MCI_RXACTIVE) - flush_dcache_page(sg_miter->page); - status = readl(base + MMCISTATUS); } while (1); @@ -509,10 +761,10 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) local_irq_restore(flags); /* - * If we're nearing the end of the read, switch to - * "any data available" mode. + * If we have less than the fifo 'half-full' threshold to transfer, + * trigger a PIO interrupt as soon as any data is available. */ - if (status & MCI_RXACTIVE && host->size < variant->fifosize) + if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) mmci_set_mask1(host, MCI_RXDATAAVLBLMASK); /* @@ -713,7 +965,8 @@ static const struct mmc_host_ops mmci_ops = { .get_cd = mmci_get_cd, }; -static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) +static int __devinit mmci_probe(struct amba_device *dev, + const struct amba_id *id) { struct mmci_platform_data *plat = dev->dev.platform_data; struct variant_data *variant = id->data; @@ -776,6 +1029,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", host->mclk); } + host->phybase = dev->res.start; host->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!host->base) { ret = -ENOMEM; @@ -903,9 +1157,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) amba_set_drvdata(dev, mmc); - dev_info(&dev->dev, "%s: PL%03x rev%u at 0x%08llx irq %d,%d\n", - mmc_hostname(mmc), amba_part(dev), amba_rev(dev), - (unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); + dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", + mmc_hostname(mmc), amba_part(dev), amba_manf(dev), + amba_rev(dev), (unsigned long long)dev->res.start, + dev->irq[0], dev->irq[1]); + + mmci_dma_setup(host); mmc_add_host(mmc); @@ -952,6 +1209,7 @@ static int __devexit mmci_remove(struct amba_device *dev) writel(0, host->base + MMCICOMMAND); writel(0, host->base + MMCIDATACTRL); + mmci_dma_release(host); free_irq(dev->irq[0], host); if (!host->singleirq) free_irq(dev->irq[1], host); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index c1df7b82d36c..ec9a7bc6d0df 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -148,8 +148,10 @@ struct clk; struct variant_data; +struct dma_chan; struct mmci_host { + phys_addr_t phybase; void __iomem *base; struct mmc_request *mrq; struct mmc_command *cmd; @@ -161,8 +163,6 @@ struct mmci_host { int gpio_cd_irq; bool singleirq; - unsigned int data_xfered; - spinlock_t lock; unsigned int mclk; @@ -181,5 +181,16 @@ struct mmci_host { struct sg_mapping_iter sg_miter; unsigned int size; struct regulator *vcc; + +#ifdef CONFIG_DMA_ENGINE + /* DMA stuff */ + struct dma_chan *dma_current; + struct dma_chan *dma_rx_channel; + struct dma_chan *dma_tx_channel; + +#define dma_inprogress(host) ((host)->dma_current) +#else +#define dma_inprogress(host) (0) +#endif }; diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 153ab977a013..97c9b3638d57 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -36,6 +36,7 @@ #include <linux/io.h> #include <linux/memory.h> #include <linux/gfp.h> +#include <linux/gpio.h> #include <asm/cacheflush.h> #include <asm/div64.h> @@ -941,6 +942,38 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq) spin_unlock_irqrestore(&host->lock, flags); } +static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable) +{ + struct msm_mmc_gpio_data *curr; + int i, rc = 0; + + if (!host->plat->gpio_data && host->gpio_config_status == enable) + return; + + curr = host->plat->gpio_data; + for (i = 0; i < curr->size; i++) { + if (enable) { + rc = gpio_request(curr->gpio[i].no, + curr->gpio[i].name); + if (rc) { + pr_err("%s: gpio_request(%d, %s) failed %d\n", + mmc_hostname(host->mmc), + curr->gpio[i].no, + curr->gpio[i].name, rc); + goto free_gpios; + } + } else { + gpio_free(curr->gpio[i].no); + } + } + host->gpio_config_status = enable; + return; + +free_gpios: + for (; i >= 0; i--) + gpio_free(curr->gpio[i].no); +} + static void msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { @@ -953,6 +986,8 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) msmsdcc_enable_clocks(host); + spin_unlock_irqrestore(&host->lock, flags); + if (ios->clock) { if (ios->clock != host->clk_rate) { rc = clk_set_rate(host->clk, ios->clock); @@ -979,9 +1014,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_OFF: + msmsdcc_setup_gpio(host, false); break; case MMC_POWER_UP: pwr |= MCI_PWR_UP; + msmsdcc_setup_gpio(host, true); break; case MMC_POWER_ON: pwr |= MCI_PWR_ON; @@ -998,9 +1035,10 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) msmsdcc_writel(host, pwr, MMCIPOWER); } #if BUSCLK_PWRSAVE + spin_lock_irqsave(&host->lock, flags); msmsdcc_disable_clocks(host, 1); -#endif spin_unlock_irqrestore(&host->lock, flags); +#endif } static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable) diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h index 939557af266d..42d7bbc977c5 100644 --- a/drivers/mmc/host/msm_sdcc.h +++ b/drivers/mmc/host/msm_sdcc.h @@ -243,6 +243,7 @@ struct msmsdcc_host { unsigned int cmd_datactrl; struct mmc_command *cmd_cmd; u32 cmd_c; + bool gpio_config_status; bool prog_scan; bool prog_enable; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 078fdf11af03..158c0ee53b2c 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -118,7 +118,7 @@ #define MMC_TIMEOUT_MS 20 #define OMAP_MMC_MASTER_CLOCK 96000000 -#define DRIVER_NAME "mmci-omap-hs" +#define DRIVER_NAME "omap_hsmmc" /* Timeouts for entering power saving states on inactivity, msec */ #define OMAP_MMC_DISABLED_TIMEOUT 100 @@ -260,7 +260,7 @@ static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on, return ret; } -static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, +static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on, int vdd) { struct omap_hsmmc_host *host = @@ -316,6 +316,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, return ret; } +static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on, + int vdd) +{ + return 0; +} + static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, int vdd, int cardsleep) { @@ -326,7 +332,7 @@ static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, return regulator_set_mode(host->vcc, mode); } -static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, +static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep, int vdd, int cardsleep) { struct omap_hsmmc_host *host = @@ -365,6 +371,12 @@ static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, return regulator_enable(host->vcc_aux); } +static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep, + int vdd, int cardsleep) +{ + return 0; +} + static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) { struct regulator *reg; @@ -379,10 +391,14 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) break; case OMAP_MMC2_DEVID: case OMAP_MMC3_DEVID: + case OMAP_MMC5_DEVID: /* Off-chip level shifting, or none */ - mmc_slot(host).set_power = omap_hsmmc_23_set_power; - mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep; + mmc_slot(host).set_power = omap_hsmmc_235_set_power; + mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep; break; + case OMAP_MMC4_DEVID: + mmc_slot(host).set_power = omap_hsmmc_4_set_power; + mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep; default: pr_err("MMC%d configuration not supported!\n", host->id); return -EINVAL; @@ -1555,7 +1571,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) break; } - if (host->id == OMAP_MMC1_DEVID) { + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { /* Only MMC1 can interface at 3V without some flavor * of external transceiver; but they all handle 1.8V. */ @@ -1647,7 +1663,7 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) u32 hctl, capa, value; /* Only MMC1 supports 3.0V */ - if (host->id == OMAP_MMC1_DEVID) { + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { hctl = SDVS30; capa = VS30 | VS18; } else { @@ -2101,14 +2117,14 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) /* we start off in DISABLED state */ host->dpm_state = DISABLED; - if (mmc_host_enable(host->mmc) != 0) { + if (clk_enable(host->iclk) != 0) { clk_put(host->iclk); clk_put(host->fclk); goto err1; } - if (clk_enable(host->iclk) != 0) { - mmc_host_disable(host->mmc); + if (mmc_host_enable(host->mmc) != 0) { + clk_disable(host->iclk); clk_put(host->iclk); clk_put(host->fclk); goto err1; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index c89592239bc7..4f6c06f16328 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -106,23 +106,6 @@ config MTD_NAND_OMAP2 help Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms. -config MTD_NAND_OMAP_PREFETCH - bool "GPMC prefetch support for NAND Flash device" - depends on MTD_NAND_OMAP2 - default y - help - The NAND device can be accessed for Read/Write using GPMC PREFETCH engine - to improve the performance. - -config MTD_NAND_OMAP_PREFETCH_DMA - depends on MTD_NAND_OMAP_PREFETCH - bool "DMA mode" - default n - help - The GPMC PREFETCH engine can be configured eigther in MPU interrupt mode - or in DMA interrupt mode. - Say y for DMA mode or MPU mode will be used - config MTD_NAND_IDS tristate @@ -476,7 +459,7 @@ config MTD_NAND_MPC5121_NFC config MTD_NAND_MXC tristate "MXC NAND support" - depends on ARCH_MX2 || ARCH_MX25 || ARCH_MX3 || ARCH_MX51 + depends on IMX_HAVE_PLATFORM_MXC_NAND help This enables the driver for the NAND flash controller on the MXC processors. diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index ef932ba55a0b..5ae1d9ee2cf1 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -722,9 +722,8 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) /* * MXC NANDFC can only perform full page+spare or * spare-only read/write. When the upper layers - * layers perform a read/write buf operation, - * we will used the saved column address to index into - * the full page. + * perform a read/write buf operation, the saved column + * address is used to index into the full page. */ host->send_addr(host, 0, page_addr == -1); if (mtd->writesize > 512) diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 28af71c61834..7b8f1fffc528 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -11,6 +11,7 @@ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/delay.h> +#include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/mtd/mtd.h> @@ -24,6 +25,7 @@ #include <plat/nand.h> #define DRIVER_NAME "omap2-nand" +#define OMAP_NAND_TIMEOUT_MS 5000 #define NAND_Ecc_P1e (1 << 0) #define NAND_Ecc_P2e (1 << 1) @@ -96,26 +98,19 @@ static const char *part_probes[] = { "cmdlinepart", NULL }; #endif -#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH -static int use_prefetch = 1; - -/* "modprobe ... use_prefetch=0" etc */ -module_param(use_prefetch, bool, 0); -MODULE_PARM_DESC(use_prefetch, "enable/disable use of PREFETCH"); - -#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA -static int use_dma = 1; +/* oob info generated runtime depending on ecc algorithm and layout selected */ +static struct nand_ecclayout omap_oobinfo; +/* Define some generic bad / good block scan pattern which are used + * while scanning a device for factory marked good / bad blocks + */ +static uint8_t scan_ff_pattern[] = { 0xff }; +static struct nand_bbt_descr bb_descrip_flashbased = { + .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, + .offs = 0, + .len = 1, + .pattern = scan_ff_pattern, +}; -/* "modprobe ... use_dma=0" etc */ -module_param(use_dma, bool, 0); -MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); -#else -static const int use_dma; -#endif -#else -const int use_prefetch; -static const int use_dma; -#endif struct omap_nand_info { struct nand_hw_control controller; @@ -129,6 +124,13 @@ struct omap_nand_info { unsigned long phys_base; struct completion comp; int dma_ch; + int gpmc_irq; + enum { + OMAP_NAND_IO_READ = 0, /* read */ + OMAP_NAND_IO_WRITE, /* write */ + } iomode; + u_char *buf; + int buf_len; }; /** @@ -256,7 +258,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len) } /* configure and start prefetch transfer */ - ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x0); + ret = gpmc_prefetch_enable(info->gpmc_cs, + PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); if (ret) { /* PFPW engine is busy, use cpu copy method */ if (info->nand.options & NAND_BUSWIDTH_16) @@ -288,9 +291,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, { struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); - uint32_t pref_count = 0, w_count = 0; + uint32_t w_count = 0; int i = 0, ret = 0; u16 *p; + unsigned long tim, limit; /* take care of subpage writes */ if (len % 2 != 0) { @@ -300,7 +304,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd, } /* configure and start prefetch transfer */ - ret = gpmc_prefetch_enable(info->gpmc_cs, 0x0, len, 0x1); + ret = gpmc_prefetch_enable(info->gpmc_cs, + PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); if (ret) { /* PFPW engine is busy, use cpu copy method */ if (info->nand.options & NAND_BUSWIDTH_16) @@ -316,15 +321,17 @@ static void omap_write_buf_pref(struct mtd_info *mtd, iowrite16(*p++, info->nand.IO_ADDR_W); } /* wait for data to flushed-out before reset the prefetch */ - do { - pref_count = gpmc_read_status(GPMC_PREFETCH_COUNT); - } while (pref_count); + tim = 0; + limit = (loops_per_jiffy * + msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); + while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) + cpu_relax(); + /* disable and stop the PFPW engine */ gpmc_prefetch_reset(info->gpmc_cs); } } -#ifdef CONFIG_MTD_NAND_OMAP_PREFETCH_DMA /* * omap_nand_dma_cb: callback on the completion of dma transfer * @lch: logical channel @@ -348,14 +355,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, { struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); - uint32_t prefetch_status = 0; enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; dma_addr_t dma_addr; int ret; + unsigned long tim, limit; - /* The fifo depth is 64 bytes. We have a sync at each frame and frame - * length is 64 bytes. + /* The fifo depth is 64 bytes max. + * But configure the FIFO-threahold to 32 to get a sync at each frame + * and frame length is 32 bytes. */ int buf_len = len >> 6; @@ -396,9 +404,10 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); } /* configure and start prefetch transfer */ - ret = gpmc_prefetch_enable(info->gpmc_cs, 0x1, len, is_write); + ret = gpmc_prefetch_enable(info->gpmc_cs, + PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); if (ret) - /* PFPW engine is busy, use cpu copy methode */ + /* PFPW engine is busy, use cpu copy method */ goto out_copy; init_completion(&info->comp); @@ -407,10 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, /* setup and start DMA using dma_addr */ wait_for_completion(&info->comp); + tim = 0; + limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); + while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) + cpu_relax(); - do { - prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); - } while (prefetch_status); /* disable and stop the PFPW engine */ gpmc_prefetch_reset(info->gpmc_cs); @@ -426,14 +436,6 @@ out_copy: : omap_write_buf8(mtd, (u_char *) addr, len); return 0; } -#else -static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) {} -static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, - unsigned int len, int is_write) -{ - return 0; -} -#endif /** * omap_read_buf_dma_pref - read data from NAND controller into buffer @@ -466,6 +468,157 @@ static void omap_write_buf_dma_pref(struct mtd_info *mtd, omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1); } +/* + * omap_nand_irq - GMPC irq handler + * @this_irq: gpmc irq number + * @dev: omap_nand_info structure pointer is passed here + */ +static irqreturn_t omap_nand_irq(int this_irq, void *dev) +{ + struct omap_nand_info *info = (struct omap_nand_info *) dev; + u32 bytes; + u32 irq_stat; + + irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); + bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); + bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ + if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ + if (irq_stat & 0x2) + goto done; + + if (info->buf_len && (info->buf_len < bytes)) + bytes = info->buf_len; + else if (!info->buf_len) + bytes = 0; + iowrite32_rep(info->nand.IO_ADDR_W, + (u32 *)info->buf, bytes >> 2); + info->buf = info->buf + bytes; + info->buf_len -= bytes; + + } else { + ioread32_rep(info->nand.IO_ADDR_R, + (u32 *)info->buf, bytes >> 2); + info->buf = info->buf + bytes; + + if (irq_stat & 0x2) + goto done; + } + gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); + + return IRQ_HANDLED; + +done: + complete(&info->comp); + /* disable irq */ + gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0); + + /* clear status */ + gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); + + return IRQ_HANDLED; +} + +/* + * omap_read_buf_irq_pref - read data from NAND controller into buffer + * @mtd: MTD device structure + * @buf: buffer to store date + * @len: number of bytes to read + */ +static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len) +{ + struct omap_nand_info *info = container_of(mtd, + struct omap_nand_info, mtd); + int ret = 0; + + if (len <= mtd->oobsize) { + omap_read_buf_pref(mtd, buf, len); + return; + } + + info->iomode = OMAP_NAND_IO_READ; + info->buf = buf; + init_completion(&info->comp); + + /* configure and start prefetch transfer */ + ret = gpmc_prefetch_enable(info->gpmc_cs, + PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); + if (ret) + /* PFPW engine is busy, use cpu copy method */ + goto out_copy; + + info->buf_len = len; + /* enable irq */ + gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, + (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); + + /* waiting for read to complete */ + wait_for_completion(&info->comp); + + /* disable and stop the PFPW engine */ + gpmc_prefetch_reset(info->gpmc_cs); + return; + +out_copy: + if (info->nand.options & NAND_BUSWIDTH_16) + omap_read_buf16(mtd, buf, len); + else + omap_read_buf8(mtd, buf, len); +} + +/* + * omap_write_buf_irq_pref - write buffer to NAND controller + * @mtd: MTD device structure + * @buf: data buffer + * @len: number of bytes to write + */ +static void omap_write_buf_irq_pref(struct mtd_info *mtd, + const u_char *buf, int len) +{ + struct omap_nand_info *info = container_of(mtd, + struct omap_nand_info, mtd); + int ret = 0; + unsigned long tim, limit; + + if (len <= mtd->oobsize) { + omap_write_buf_pref(mtd, buf, len); + return; + } + + info->iomode = OMAP_NAND_IO_WRITE; + info->buf = (u_char *) buf; + init_completion(&info->comp); + + /* configure and start prefetch transfer : size=24 */ + ret = gpmc_prefetch_enable(info->gpmc_cs, + (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); + if (ret) + /* PFPW engine is busy, use cpu copy method */ + goto out_copy; + + info->buf_len = len; + /* enable irq */ + gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, + (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); + + /* waiting for write to complete */ + wait_for_completion(&info->comp); + /* wait for data to flushed-out before reset the prefetch */ + tim = 0; + limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); + while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) + cpu_relax(); + + /* disable and stop the PFPW engine */ + gpmc_prefetch_reset(info->gpmc_cs); + return; + +out_copy: + if (info->nand.options & NAND_BUSWIDTH_16) + omap_write_buf16(mtd, buf, len); + else + omap_write_buf8(mtd, buf, len); +} + /** * omap_verify_buf - Verify chip data against buffer * @mtd: MTD device structure @@ -487,8 +640,6 @@ static int omap_verify_buf(struct mtd_info *mtd, const u_char * buf, int len) return 0; } -#ifdef CONFIG_MTD_NAND_OMAP_HWECC - /** * gen_true_ecc - This function will generate true ECC value * @ecc_buf: buffer to store ecc code @@ -708,8 +859,6 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode) gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); } -#endif - /** * omap_wait - wait until the command is done * @mtd: MTD device structure @@ -779,6 +928,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) struct omap_nand_info *info; struct omap_nand_platform_data *pdata; int err; + int i, offset; pdata = pdev->dev.platform_data; if (pdata == NULL) { @@ -804,7 +954,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) info->mtd.name = dev_name(&pdev->dev); info->mtd.owner = THIS_MODULE; - info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0; + info->nand.options = pdata->devsize; info->nand.options |= NAND_SKIP_BBTSCAN; /* NAND write protect off */ @@ -842,28 +992,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) info->nand.chip_delay = 50; } - if (use_prefetch) { - + switch (pdata->xfer_type) { + case NAND_OMAP_PREFETCH_POLLED: info->nand.read_buf = omap_read_buf_pref; info->nand.write_buf = omap_write_buf_pref; - if (use_dma) { - err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", - omap_nand_dma_cb, &info->comp, &info->dma_ch); - if (err < 0) { - info->dma_ch = -1; - printk(KERN_WARNING "DMA request failed." - " Non-dma data transfer mode\n"); - } else { - omap_set_dma_dest_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - omap_set_dma_src_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - - info->nand.read_buf = omap_read_buf_dma_pref; - info->nand.write_buf = omap_write_buf_dma_pref; - } - } - } else { + break; + + case NAND_OMAP_POLLED: if (info->nand.options & NAND_BUSWIDTH_16) { info->nand.read_buf = omap_read_buf16; info->nand.write_buf = omap_write_buf16; @@ -871,20 +1006,61 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) info->nand.read_buf = omap_read_buf8; info->nand.write_buf = omap_write_buf8; } + break; + + case NAND_OMAP_PREFETCH_DMA: + err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", + omap_nand_dma_cb, &info->comp, &info->dma_ch); + if (err < 0) { + info->dma_ch = -1; + dev_err(&pdev->dev, "DMA request failed!\n"); + goto out_release_mem_region; + } else { + omap_set_dma_dest_burst_mode(info->dma_ch, + OMAP_DMA_DATA_BURST_16); + omap_set_dma_src_burst_mode(info->dma_ch, + OMAP_DMA_DATA_BURST_16); + + info->nand.read_buf = omap_read_buf_dma_pref; + info->nand.write_buf = omap_write_buf_dma_pref; + } + break; + + case NAND_OMAP_PREFETCH_IRQ: + err = request_irq(pdata->gpmc_irq, + omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); + if (err) { + dev_err(&pdev->dev, "requesting irq(%d) error:%d", + pdata->gpmc_irq, err); + goto out_release_mem_region; + } else { + info->gpmc_irq = pdata->gpmc_irq; + info->nand.read_buf = omap_read_buf_irq_pref; + info->nand.write_buf = omap_write_buf_irq_pref; + } + break; + + default: + dev_err(&pdev->dev, + "xfer_type(%d) not supported!\n", pdata->xfer_type); + err = -EINVAL; + goto out_release_mem_region; } - info->nand.verify_buf = omap_verify_buf; -#ifdef CONFIG_MTD_NAND_OMAP_HWECC - info->nand.ecc.bytes = 3; - info->nand.ecc.size = 512; - info->nand.ecc.calculate = omap_calculate_ecc; - info->nand.ecc.hwctl = omap_enable_hwecc; - info->nand.ecc.correct = omap_correct_data; - info->nand.ecc.mode = NAND_ECC_HW; + info->nand.verify_buf = omap_verify_buf; -#else - info->nand.ecc.mode = NAND_ECC_SOFT; -#endif + /* selsect the ecc type */ + if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_DEFAULT) + info->nand.ecc.mode = NAND_ECC_SOFT; + else if ((pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW) || + (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { + info->nand.ecc.bytes = 3; + info->nand.ecc.size = 512; + info->nand.ecc.calculate = omap_calculate_ecc; + info->nand.ecc.hwctl = omap_enable_hwecc; + info->nand.ecc.correct = omap_correct_data; + info->nand.ecc.mode = NAND_ECC_HW; + } /* DIP switches on some boards change between 8 and 16 bit * bus widths for flash. Try the other width if the first try fails. @@ -897,6 +1073,26 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) } } + /* rom code layout */ + if (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE) { + + if (info->nand.options & NAND_BUSWIDTH_16) + offset = 2; + else { + offset = 1; + info->nand.badblock_pattern = &bb_descrip_flashbased; + } + omap_oobinfo.eccbytes = 3 * (info->mtd.oobsize/16); + for (i = 0; i < omap_oobinfo.eccbytes; i++) + omap_oobinfo.eccpos[i] = i+offset; + + omap_oobinfo.oobfree->offset = offset + omap_oobinfo.eccbytes; + omap_oobinfo.oobfree->length = info->mtd.oobsize - + (offset + omap_oobinfo.eccbytes); + + info->nand.ecc.layout = &omap_oobinfo; + } + #ifdef CONFIG_MTD_PARTITIONS err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0); if (err > 0) @@ -926,9 +1122,12 @@ static int omap_nand_remove(struct platform_device *pdev) mtd); platform_set_drvdata(pdev, NULL); - if (use_dma) + if (info->dma_ch != -1) omap_free_dma(info->dma_ch); + if (info->gpmc_irq) + free_irq(info->gpmc_irq, info); + /* Release NAND device, its internal structures and partitions */ nand_release(&info->mtd); iounmap(info->nand.IO_ADDR_R); @@ -947,16 +1146,8 @@ static struct platform_driver omap_nand_driver = { static int __init omap_nand_init(void) { - printk(KERN_INFO "%s driver initializing\n", DRIVER_NAME); + pr_info("%s driver initializing\n", DRIVER_NAME); - /* This check is required if driver is being - * loaded run time as a module - */ - if ((1 == use_dma) && (0 == use_prefetch)) { - printk(KERN_INFO"Wrong parameters: 'use_dma' can not be 1 " - "without use_prefetch'. Prefetch will not be" - " used in either mode (mpu or dma)\n"); - } return platform_driver_register(&omap_nand_driver); } diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig index 4dbd0f58eebf..4f426195f8db 100644 --- a/drivers/mtd/onenand/Kconfig +++ b/drivers/mtd/onenand/Kconfig @@ -32,7 +32,7 @@ config MTD_ONENAND_OMAP2 config MTD_ONENAND_SAMSUNG tristate "OneNAND on Samsung SOC controller support" - depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5PV310 + depends on ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS4 help Support for a OneNAND flash device connected to an Samsung SOC. S3C64XX/S5PC100 use command mapping method. diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index c849cacf4b2f..14a49abe057e 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c @@ -63,7 +63,7 @@ struct omap2_onenand { struct completion dma_done; int dma_channel; int freq; - int (*setup)(void __iomem *base, int freq); + int (*setup)(void __iomem *base, int *freq_ptr); struct regulator *regulator; }; @@ -148,11 +148,9 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state) wait_err("controller error", state, ctrl, intr); return -EIO; } - if ((intr & intr_flags) != intr_flags) { - wait_err("timeout", state, ctrl, intr); - return -EIO; - } - return 0; + if ((intr & intr_flags) == intr_flags) + return 0; + /* Continue in wait for interrupt branch */ } if (state != FL_READING) { @@ -581,7 +579,7 @@ static int __adjust_timing(struct device *dev, void *data) /* DMA is not in use so this is all that is needed */ /* Revisit for OMAP3! */ - ret = c->setup(c->onenand.base, c->freq); + ret = c->setup(c->onenand.base, &c->freq); return ret; } @@ -673,7 +671,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) } if (pdata->onenand_setup != NULL) { - r = pdata->onenand_setup(c->onenand.base, c->freq); + r = pdata->onenand_setup(c->onenand.base, &c->freq); if (r < 0) { dev_err(&pdev->dev, "Onenand platform setup failed: " "%d\n", r); @@ -718,8 +716,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) } dev_info(&pdev->dev, "initializing on CS%d, phys base 0x%08lx, virtual " - "base %p\n", c->gpmc_cs, c->phys_base, - c->onenand.base); + "base %p, freq %d MHz\n", c->gpmc_cs, c->phys_base, + c->onenand.base, c->freq); c->pdev = pdev; c->mtd.name = dev_name(&pdev->dev); @@ -754,24 +752,6 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) if ((r = onenand_scan(&c->mtd, 1)) < 0) goto err_release_regulator; - switch ((c->onenand.version_id >> 4) & 0xf) { - case 0: - c->freq = 40; - break; - case 1: - c->freq = 54; - break; - case 2: - c->freq = 66; - break; - case 3: - c->freq = 83; - break; - case 4: - c->freq = 104; - break; - } - #ifdef CONFIG_MTD_PARTITIONS r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0); if (r > 0) diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig index 3cf193fb5e00..6abeb4f13403 100644 --- a/drivers/mtd/ubi/Kconfig +++ b/drivers/mtd/ubi/Kconfig @@ -52,6 +52,12 @@ config MTD_UBI_GLUEBI work on top of UBI. Do not enable this unless you use legacy software. -source "drivers/mtd/ubi/Kconfig.debug" +config MTD_UBI_DEBUG + bool "UBI debugging" + depends on SYSFS + select DEBUG_FS + select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL + help + This option enables UBI debugging. endif # MTD_UBI diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug deleted file mode 100644 index fad4adc0fe2c..000000000000 --- a/drivers/mtd/ubi/Kconfig.debug +++ /dev/null @@ -1,73 +0,0 @@ -comment "UBI debugging options" - -config MTD_UBI_DEBUG - bool "UBI debugging" - depends on SYSFS - select DEBUG_FS - select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL - help - This option enables UBI debugging. - -if MTD_UBI_DEBUG - -config MTD_UBI_DEBUG_MSG - bool "UBI debugging messages" - help - This option enables UBI debugging messages. - -config MTD_UBI_DEBUG_PARANOID - bool "Extra self-checks" - help - This option enables extra checks in UBI code. Note this slows UBI down - significantly. - -config MTD_UBI_DEBUG_DISABLE_BGT - bool "Do not enable the UBI background thread" - help - This option switches the background thread off by default. The thread - may be also be enabled/disabled via UBI sysfs. - -config MTD_UBI_DEBUG_EMULATE_BITFLIPS - bool "Emulate flash bit-flips" - help - This option emulates bit-flips with probability 1/50, which in turn - causes scrubbing. Useful for debugging and stressing UBI. - -config MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES - bool "Emulate flash write failures" - help - This option emulates write failures with probability 1/100. Useful for - debugging and testing how UBI handlines errors. - -config MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES - bool "Emulate flash erase failures" - help - This option emulates erase failures with probability 1/100. Useful for - debugging and testing how UBI handlines errors. - -comment "Additional UBI debugging messages" - -config MTD_UBI_DEBUG_MSG_BLD - bool "Additional UBI initialization and build messages" - help - This option enables detailed UBI initialization and device build - debugging messages. - -config MTD_UBI_DEBUG_MSG_EBA - bool "Eraseblock association unit messages" - help - This option enables debugging messages from the UBI eraseblock - association unit. - -config MTD_UBI_DEBUG_MSG_WL - bool "Wear-leveling unit messages" - help - This option enables debugging messages from the UBI wear-leveling - unit. - -config MTD_UBI_DEBUG_MSG_IO - bool "Input/output unit messages" - help - This option enables debugging messages from the UBI input/output unit. - -endif # MTD_UBI_DEBUG diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 5ebe280225d6..65626c1c446d 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -690,11 +690,25 @@ static int io_init(struct ubi_device *ubi) ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); + ubi->max_write_size = ubi->mtd->writebufsize; + /* + * Maximum write size has to be greater or equivalent to min. I/O + * size, and be multiple of min. I/O size. + */ + if (ubi->max_write_size < ubi->min_io_size || + ubi->max_write_size % ubi->min_io_size || + !is_power_of_2(ubi->max_write_size)) { + ubi_err("bad write buffer size %d for %d min. I/O unit", + ubi->max_write_size, ubi->min_io_size); + return -EINVAL; + } + /* Calculate default aligned sizes of EC and VID headers */ ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); dbg_msg("min_io_size %d", ubi->min_io_size); + dbg_msg("max_write_size %d", ubi->max_write_size); dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); @@ -711,7 +725,7 @@ static int io_init(struct ubi_device *ubi) } /* Similar for the data offset */ - ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE; + ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); @@ -923,6 +937,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) spin_lock_init(&ubi->volumes_lock); ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); + dbg_msg("sizeof(struct ubi_scan_leb) %zu", sizeof(struct ubi_scan_leb)); + dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry)); err = io_init(ubi); if (err) @@ -937,13 +953,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) if (!ubi->peb_buf2) goto out_free; -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID - mutex_init(&ubi->dbg_buf_mutex); - ubi->dbg_peb_buf = vmalloc(ubi->peb_size); - if (!ubi->dbg_peb_buf) - goto out_free; -#endif - err = attach_by_scanning(ubi); if (err) { dbg_err("failed to attach by scanning, error %d", err); @@ -991,8 +1000,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. */ spin_lock(&ubi->wl_lock); - if (!DBG_DISABLE_BGT) - ubi->thread_enabled = 1; + ubi->thread_enabled = 1; wake_up_process(ubi->bgt_thread); spin_unlock(&ubi->wl_lock); @@ -1009,9 +1017,6 @@ out_detach: out_free: vfree(ubi->peb_buf1); vfree(ubi->peb_buf2); -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID - vfree(ubi->dbg_peb_buf); -#endif if (ref) put_device(&ubi->dev); else @@ -1082,9 +1087,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) put_mtd_device(ubi->mtd); vfree(ubi->peb_buf1); vfree(ubi->peb_buf2); -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID - vfree(ubi->dbg_peb_buf); -#endif ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); put_device(&ubi->dev); return 0; diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 4876977e52cb..d4d07e5f138f 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c @@ -27,6 +27,20 @@ #ifdef CONFIG_MTD_UBI_DEBUG #include "ubi.h" +#include <linux/module.h> +#include <linux/moduleparam.h> + +unsigned int ubi_msg_flags; +unsigned int ubi_chk_flags; +unsigned int ubi_tst_flags; + +module_param_named(debug_msgs, ubi_msg_flags, uint, S_IRUGO | S_IWUSR); +module_param_named(debug_chks, ubi_chk_flags, uint, S_IRUGO | S_IWUSR); +module_param_named(debug_tsts, ubi_chk_flags, uint, S_IRUGO | S_IWUSR); + +MODULE_PARM_DESC(debug_msgs, "Debug message type flags"); +MODULE_PARM_DESC(debug_chks, "Debug check flags"); +MODULE_PARM_DESC(debug_tsts, "Debug special test flags"); /** * ubi_dbg_dump_ec_hdr - dump an erase counter header. diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 9eca95074bc2..0b0c2888c656 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h @@ -38,6 +38,11 @@ printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ current->pid, __func__, ##__VA_ARGS__) +#define dbg_do_msg(typ, fmt, ...) do { \ + if (ubi_msg_flags & typ) \ + dbg_msg(fmt, ##__VA_ARGS__); \ +} while (0) + #define ubi_dbg_dump_stack() dump_stack() struct ubi_ec_hdr; @@ -57,62 +62,88 @@ void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len); +extern unsigned int ubi_msg_flags; + +/* + * Debugging message type flags (must match msg_type_names in debug.c). + * + * UBI_MSG_GEN: general messages + * UBI_MSG_EBA: journal messages + * UBI_MSG_WL: mount messages + * UBI_MSG_IO: commit messages + * UBI_MSG_BLD: LEB find messages + */ +enum { + UBI_MSG_GEN = 0x1, + UBI_MSG_EBA = 0x2, + UBI_MSG_WL = 0x4, + UBI_MSG_IO = 0x8, + UBI_MSG_BLD = 0x10, +}; + #define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \ print_hex_dump(l, ps, pt, r, g, b, len, a) -#ifdef CONFIG_MTD_UBI_DEBUG_MSG /* General debugging messages */ -#define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) -#else -#define dbg_gen(fmt, ...) ({}) -#endif +#define dbg_gen(fmt, ...) dbg_do_msg(UBI_MSG_GEN, fmt, ##__VA_ARGS__) -#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA /* Messages from the eraseblock association sub-system */ -#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) -#else -#define dbg_eba(fmt, ...) ({}) -#endif +#define dbg_eba(fmt, ...) dbg_do_msg(UBI_MSG_EBA, fmt, ##__VA_ARGS__) -#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL /* Messages from the wear-leveling sub-system */ -#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) -#else -#define dbg_wl(fmt, ...) ({}) -#endif +#define dbg_wl(fmt, ...) dbg_do_msg(UBI_MSG_WL, fmt, ##__VA_ARGS__) -#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO /* Messages from the input/output sub-system */ -#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) -#else -#define dbg_io(fmt, ...) ({}) -#endif +#define dbg_io(fmt, ...) dbg_do_msg(UBI_MSG_IO, fmt, ##__VA_ARGS__) -#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD /* Initialization and build messages */ -#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) -#define UBI_IO_DEBUG 1 -#else -#define dbg_bld(fmt, ...) ({}) -#define UBI_IO_DEBUG 0 -#endif +#define dbg_bld(fmt, ...) dbg_do_msg(UBI_MSG_BLD, fmt, ##__VA_ARGS__) + +extern unsigned int ubi_chk_flags; + +/* + * Debugging check flags. + * + * UBI_CHK_GEN: general checks + * UBI_CHK_IO: check writes and erases + */ +enum { + UBI_CHK_GEN = 0x1, + UBI_CHK_IO = 0x2, +}; -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len); int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, int len); -#else -#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0 -#define ubi_dbg_check_write(ubi, buf, pnum, offset, len) 0 -#endif -#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT -#define DBG_DISABLE_BGT 1 -#else -#define DBG_DISABLE_BGT 0 -#endif +extern unsigned int ubi_tst_flags; + +/* + * Special testing flags. + * + * UBIFS_TST_DISABLE_BGT: disable the background thread + * UBI_TST_EMULATE_BITFLIPS: emulate bit-flips + * UBI_TST_EMULATE_WRITE_FAILURES: emulate write failures + * UBI_TST_EMULATE_ERASE_FAILURES: emulate erase failures + */ +enum { + UBI_TST_DISABLE_BGT = 0x1, + UBI_TST_EMULATE_BITFLIPS = 0x2, + UBI_TST_EMULATE_WRITE_FAILURES = 0x4, + UBI_TST_EMULATE_ERASE_FAILURES = 0x8, +}; + +/** + * ubi_dbg_is_bgt_disabled - if the background thread is disabled. + * + * Returns non-zero if the UBI background thread is disabled for testing + * purposes. + */ +static inline int ubi_dbg_is_bgt_disabled(void) +{ + return ubi_tst_flags & UBI_TST_DISABLE_BGT; +} -#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS /** * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip. * @@ -120,13 +151,11 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum, */ static inline int ubi_dbg_is_bitflip(void) { - return !(random32() % 200); + if (ubi_tst_flags & UBI_TST_EMULATE_BITFLIPS) + return !(random32() % 200); + return 0; } -#else -#define ubi_dbg_is_bitflip() 0 -#endif -#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES /** * ubi_dbg_is_write_failure - if it is time to emulate a write failure. * @@ -135,13 +164,11 @@ static inline int ubi_dbg_is_bitflip(void) */ static inline int ubi_dbg_is_write_failure(void) { - return !(random32() % 500); + if (ubi_tst_flags & UBI_TST_EMULATE_WRITE_FAILURES) + return !(random32() % 500); + return 0; } -#else -#define ubi_dbg_is_write_failure() 0 -#endif -#ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES /** * ubi_dbg_is_erase_failure - if its time to emulate an erase failure. * @@ -150,11 +177,10 @@ static inline int ubi_dbg_is_write_failure(void) */ static inline int ubi_dbg_is_erase_failure(void) { + if (ubi_tst_flags & UBI_TST_EMULATE_ERASE_FAILURES) return !(random32() % 400); + return 0; } -#else -#define ubi_dbg_is_erase_failure() 0 -#endif #else @@ -177,8 +203,7 @@ static inline int ubi_dbg_is_erase_failure(void) #define ubi_dbg_dump_flash(ubi, pnum, offset, len) ({}) #define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) ({}) -#define UBI_IO_DEBUG 0 -#define DBG_DISABLE_BGT 0 +#define ubi_dbg_is_bgt_disabled() 0 #define ubi_dbg_is_bitflip() 0 #define ubi_dbg_is_write_failure() 0 #define ubi_dbg_is_erase_failure() 0 diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 811775aa8ee8..aaa6e1e83b29 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -91,7 +91,7 @@ #include <linux/slab.h> #include "ubi.h" -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +#ifdef CONFIG_MTD_UBI_DEBUG static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum); static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum); static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, @@ -146,6 +146,28 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, if (err) return err; + /* + * Deliberately corrupt the buffer to improve robustness. Indeed, if we + * do not do this, the following may happen: + * 1. The buffer contains data from previous operation, e.g., read from + * another PEB previously. The data looks like expected, e.g., if we + * just do not read anything and return - the caller would not + * notice this. E.g., if we are reading a VID header, the buffer may + * contain a valid VID header from another PEB. + * 2. The driver is buggy and returns us success or -EBADMSG or + * -EUCLEAN, but it does not actually put any data to the buffer. + * + * This may confuse UBI or upper layers - they may think the buffer + * contains valid data while in fact it is just old data. This is + * especially possible because UBI (and UBIFS) relies on CRC, and + * treats data as correct even in case of ECC errors if the CRC is + * correct. + * + * Try to prevent this situation by changing the first byte of the + * buffer. + */ + *((uint8_t *)buf) ^= 0xFF; + addr = (loff_t)pnum * ubi->peb_size + offset; retry: err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); @@ -166,7 +188,7 @@ retry: return UBI_IO_BITFLIPS; } - if (read != len && retries++ < UBI_IO_RETRIES) { + if (retries++ < UBI_IO_RETRIES) { dbg_io("error %d%s while reading %d bytes from PEB %d:%d," " read only %zd bytes, retry", err, errstr, len, pnum, offset, read); @@ -480,6 +502,13 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum) size_t written; loff_t addr; uint32_t data = 0; + /* + * Note, we cannot generally define VID header buffers on stack, + * because of the way we deal with these buffers (see the header + * comment in this file). But we know this is a NOR-specific piece of + * code, so we can do this. But yes, this is error-prone and we should + * (pre-)allocate VID header buffer instead. + */ struct ubi_vid_hdr vid_hdr; /* @@ -507,11 +536,13 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum) * PEB. */ err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); - if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) { + if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR || + err1 == UBI_IO_FF) { struct ubi_ec_hdr ec_hdr; err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0); - if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) + if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR || + err1 == UBI_IO_FF) /* * Both VID and EC headers are corrupted, so we can * safely erase this PEB and not afraid that it will be @@ -752,9 +783,8 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, if (verbose) ubi_warn("no EC header found at PEB %d, " "only 0xFF bytes", pnum); - else if (UBI_IO_DEBUG) - dbg_msg("no EC header found at PEB %d, " - "only 0xFF bytes", pnum); + dbg_bld("no EC header found at PEB %d, " + "only 0xFF bytes", pnum); if (!read_err) return UBI_IO_FF; else @@ -769,9 +799,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad magic number at PEB %d: %08x instead of " "%08x", pnum, magic, UBI_EC_HDR_MAGIC); ubi_dbg_dump_ec_hdr(ec_hdr); - } else if (UBI_IO_DEBUG) - dbg_msg("bad magic number at PEB %d: %08x instead of " - "%08x", pnum, magic, UBI_EC_HDR_MAGIC); + } + dbg_bld("bad magic number at PEB %d: %08x instead of " + "%08x", pnum, magic, UBI_EC_HDR_MAGIC); return UBI_IO_BAD_HDR; } @@ -783,9 +813,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad EC header CRC at PEB %d, calculated " "%#08x, read %#08x", pnum, crc, hdr_crc); ubi_dbg_dump_ec_hdr(ec_hdr); - } else if (UBI_IO_DEBUG) - dbg_msg("bad EC header CRC at PEB %d, calculated " - "%#08x, read %#08x", pnum, crc, hdr_crc); + } + dbg_bld("bad EC header CRC at PEB %d, calculated " + "%#08x, read %#08x", pnum, crc, hdr_crc); if (!read_err) return UBI_IO_BAD_HDR; @@ -1008,9 +1038,8 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, if (verbose) ubi_warn("no VID header found at PEB %d, " "only 0xFF bytes", pnum); - else if (UBI_IO_DEBUG) - dbg_msg("no VID header found at PEB %d, " - "only 0xFF bytes", pnum); + dbg_bld("no VID header found at PEB %d, " + "only 0xFF bytes", pnum); if (!read_err) return UBI_IO_FF; else @@ -1021,9 +1050,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad magic number at PEB %d: %08x instead of " "%08x", pnum, magic, UBI_VID_HDR_MAGIC); ubi_dbg_dump_vid_hdr(vid_hdr); - } else if (UBI_IO_DEBUG) - dbg_msg("bad magic number at PEB %d: %08x instead of " - "%08x", pnum, magic, UBI_VID_HDR_MAGIC); + } + dbg_bld("bad magic number at PEB %d: %08x instead of " + "%08x", pnum, magic, UBI_VID_HDR_MAGIC); return UBI_IO_BAD_HDR; } @@ -1035,9 +1064,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ubi_warn("bad CRC at PEB %d, calculated %#08x, " "read %#08x", pnum, crc, hdr_crc); ubi_dbg_dump_vid_hdr(vid_hdr); - } else if (UBI_IO_DEBUG) - dbg_msg("bad CRC at PEB %d, calculated %#08x, " - "read %#08x", pnum, crc, hdr_crc); + } + dbg_bld("bad CRC at PEB %d, calculated %#08x, " + "read %#08x", pnum, crc, hdr_crc); if (!read_err) return UBI_IO_BAD_HDR; else @@ -1097,7 +1126,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, return err; } -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +#ifdef CONFIG_MTD_UBI_DEBUG /** * paranoid_check_not_bad - ensure that a physical eraseblock is not bad. @@ -1111,6 +1140,9 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum) { int err; + if (!(ubi_chk_flags & UBI_CHK_IO)) + return 0; + err = ubi_io_is_bad(ubi, pnum); if (!err) return err; @@ -1135,6 +1167,9 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum, int err; uint32_t magic; + if (!(ubi_chk_flags & UBI_CHK_IO)) + return 0; + magic = be32_to_cpu(ec_hdr->magic); if (magic != UBI_EC_HDR_MAGIC) { ubi_err("bad magic %#08x, must be %#08x", @@ -1170,6 +1205,9 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum) uint32_t crc, hdr_crc; struct ubi_ec_hdr *ec_hdr; + if (!(ubi_chk_flags & UBI_CHK_IO)) + return 0; + ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; @@ -1211,6 +1249,9 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, int err; uint32_t magic; + if (!(ubi_chk_flags & UBI_CHK_IO)) + return 0; + magic = be32_to_cpu(vid_hdr->magic); if (magic != UBI_VID_HDR_MAGIC) { ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x", @@ -1249,6 +1290,9 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum) struct ubi_vid_hdr *vid_hdr; void *p; + if (!(ubi_chk_flags & UBI_CHK_IO)) + return 0; + vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); if (!vid_hdr) return -ENOMEM; @@ -1294,15 +1338,26 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum, int offset, int len) { int err, i; + size_t read; + void *buf1; + loff_t addr = (loff_t)pnum * ubi->peb_size + offset; - mutex_lock(&ubi->dbg_buf_mutex); - err = ubi_io_read(ubi, ubi->dbg_peb_buf, pnum, offset, len); - if (err) - goto out_unlock; + if (!(ubi_chk_flags & UBI_CHK_IO)) + return 0; + + buf1 = __vmalloc(len, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL); + if (!buf1) { + ubi_err("cannot allocate memory to check writes"); + return 0; + } + + err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1); + if (err && err != -EUCLEAN) + goto out_free; for (i = 0; i < len; i++) { uint8_t c = ((uint8_t *)buf)[i]; - uint8_t c1 = ((uint8_t *)ubi->dbg_peb_buf)[i]; + uint8_t c1 = ((uint8_t *)buf1)[i]; int dump_len; if (c == c1) @@ -1319,17 +1374,17 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum, ubi_msg("hex dump of the read buffer from %d to %d", i, i + dump_len); print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, - ubi->dbg_peb_buf + i, dump_len, 1); + buf1 + i, dump_len, 1); ubi_dbg_dump_stack(); err = -EINVAL; - goto out_unlock; + goto out_free; } - mutex_unlock(&ubi->dbg_buf_mutex); + vfree(buf1); return 0; -out_unlock: - mutex_unlock(&ubi->dbg_buf_mutex); +out_free: + vfree(buf1); return err; } @@ -1348,36 +1403,44 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) { size_t read; int err; + void *buf; loff_t addr = (loff_t)pnum * ubi->peb_size + offset; - mutex_lock(&ubi->dbg_buf_mutex); - err = ubi->mtd->read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf); + if (!(ubi_chk_flags & UBI_CHK_IO)) + return 0; + + buf = __vmalloc(len, GFP_KERNEL | GFP_NOFS, PAGE_KERNEL); + if (!buf) { + ubi_err("cannot allocate memory to check for 0xFFs"); + return 0; + } + + err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf); if (err && err != -EUCLEAN) { ubi_err("error %d while reading %d bytes from PEB %d:%d, " "read %zd bytes", err, len, pnum, offset, read); goto error; } - err = ubi_check_pattern(ubi->dbg_peb_buf, 0xFF, len); + err = ubi_check_pattern(buf, 0xFF, len); if (err == 0) { ubi_err("flash region at PEB %d:%d, length %d does not " "contain all 0xFF bytes", pnum, offset, len); goto fail; } - mutex_unlock(&ubi->dbg_buf_mutex); + vfree(buf); return 0; fail: ubi_err("paranoid check failed for PEB %d", pnum); ubi_msg("hex dump of the %d-%d region", offset, offset + len); - print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, - ubi->dbg_peb_buf, len, 1); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); err = -EINVAL; error: ubi_dbg_dump_stack(); - mutex_unlock(&ubi->dbg_buf_mutex); + vfree(buf); return err; } -#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ +#endif /* CONFIG_MTD_UBI_DEBUG */ diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 69fa4ef03c53..d39716e5b204 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -40,7 +40,9 @@ void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di) { di->ubi_num = ubi->ubi_num; di->leb_size = ubi->leb_size; + di->leb_start = ubi->leb_start; di->min_io_size = ubi->min_io_size; + di->max_write_size = ubi->max_write_size; di->ro_mode = ubi->ro_mode; di->cdev = ubi->cdev.dev; } diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 79ca304fc4db..11eb8ef12485 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -39,32 +39,46 @@ * eraseblocks are put to the @free list and the physical eraseblock to be * erased are put to the @erase list. * + * About corruptions + * ~~~~~~~~~~~~~~~~~ + * + * UBI protects EC and VID headers with CRC-32 checksums, so it can detect + * whether the headers are corrupted or not. Sometimes UBI also protects the + * data with CRC-32, e.g., when it executes the atomic LEB change operation, or + * when it moves the contents of a PEB for wear-leveling purposes. + * * UBI tries to distinguish between 2 types of corruptions. - * 1. Corruptions caused by power cuts. These are harmless and expected - * corruptions and UBI tries to handle them gracefully, without printing too - * many warnings and error messages. The idea is that we do not lose - * important data in these case - we may lose only the data which was being - * written to the media just before the power cut happened, and the upper - * layers (e.g., UBIFS) are supposed to handle these situations. UBI puts - * these PEBs to the head of the @erase list and they are scheduled for - * erasure. + * + * 1. Corruptions caused by power cuts. These are expected corruptions and UBI + * tries to handle them gracefully, without printing too many warnings and + * error messages. The idea is that we do not lose important data in these case + * - we may lose only the data which was being written to the media just before + * the power cut happened, and the upper layers (e.g., UBIFS) are supposed to + * handle such data losses (e.g., by using the FS journal). + * + * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like + * the reason is a power cut, UBI puts this PEB to the @erase list, and all + * PEBs in the @erase list are scheduled for erasure later. * * 2. Unexpected corruptions which are not caused by power cuts. During - * scanning, such PEBs are put to the @corr list and UBI preserves them. - * Obviously, this lessens the amount of available PEBs, and if at some - * point UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly - * informs about such PEBs every time the MTD device is attached. + * scanning, such PEBs are put to the @corr list and UBI preserves them. + * Obviously, this lessens the amount of available PEBs, and if at some point + * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs + * about such PEBs every time the MTD device is attached. * * However, it is difficult to reliably distinguish between these types of - * corruptions and UBI's strategy is as follows. UBI assumes (2.) if the VID - * header is corrupted and the data area does not contain all 0xFFs, and there - * were not bit-flips or integrity errors while reading the data area. Otherwise - * UBI assumes (1.). The assumptions are: - * o if the data area contains only 0xFFs, there is no data, and it is safe - * to just erase this PEB. - * o if the data area has bit-flips and data integrity errors (ECC errors on + * corruptions and UBI's strategy is as follows. UBI assumes corruption type 2 + * if the VID header is corrupted and the data area does not contain all 0xFFs, + * and there were no bit-flips or integrity errors while reading the data area. + * Otherwise UBI assumes corruption type 1. So the decision criteria are as + * follows. + * o If the data area contains only 0xFFs, there is no data, and it is safe + * to just erase this PEB - this is corruption type 1. + * o If the data area has bit-flips or data integrity errors (ECC errors on * NAND), it is probably a PEB which was being erased when power cut - * happened. + * happened, so this is corruption type 1. However, this is just a guess, + * which might be wrong. + * o Otherwise this it corruption type 2. */ #include <linux/err.h> @@ -74,7 +88,7 @@ #include <linux/random.h> #include "ubi.h" -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +#ifdef CONFIG_MTD_UBI_DEBUG static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si); #else #define paranoid_check_si(ubi, si) 0 @@ -115,7 +129,7 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head, } else BUG(); - seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); + seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL); if (!seb) return -ENOMEM; @@ -144,7 +158,7 @@ static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec) dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); - seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); + seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL); if (!seb) return -ENOMEM; @@ -553,7 +567,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, if (err) return err; - seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); + seb = kmem_cache_alloc(si->scan_leb_slab, GFP_KERNEL); if (!seb) return -ENOMEM; @@ -1152,9 +1166,15 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi) si->volumes = RB_ROOT; err = -ENOMEM; + si->scan_leb_slab = kmem_cache_create("ubi_scan_leb_slab", + sizeof(struct ubi_scan_leb), + 0, 0, NULL); + if (!si->scan_leb_slab) + goto out_si; + ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) - goto out_si; + goto out_slab; vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vidh) @@ -1215,6 +1235,8 @@ out_vidh: ubi_free_vid_hdr(ubi, vidh); out_ech: kfree(ech); +out_slab: + kmem_cache_destroy(si->scan_leb_slab); out_si: ubi_scan_destroy_si(si); return ERR_PTR(err); @@ -1223,11 +1245,12 @@ out_si: /** * destroy_sv - free the scanning volume information * @sv: scanning volume information + * @si: scanning information * * This function destroys the volume RB-tree (@sv->root) and the scanning * volume information. */ -static void destroy_sv(struct ubi_scan_volume *sv) +static void destroy_sv(struct ubi_scan_info *si, struct ubi_scan_volume *sv) { struct ubi_scan_leb *seb; struct rb_node *this = sv->root.rb_node; @@ -1247,7 +1270,7 @@ static void destroy_sv(struct ubi_scan_volume *sv) this->rb_right = NULL; } - kfree(seb); + kmem_cache_free(si->scan_leb_slab, seb); } } kfree(sv); @@ -1265,19 +1288,19 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si) list_for_each_entry_safe(seb, seb_tmp, &si->alien, u.list) { list_del(&seb->u.list); - kfree(seb); + kmem_cache_free(si->scan_leb_slab, seb); } list_for_each_entry_safe(seb, seb_tmp, &si->erase, u.list) { list_del(&seb->u.list); - kfree(seb); + kmem_cache_free(si->scan_leb_slab, seb); } list_for_each_entry_safe(seb, seb_tmp, &si->corr, u.list) { list_del(&seb->u.list); - kfree(seb); + kmem_cache_free(si->scan_leb_slab, seb); } list_for_each_entry_safe(seb, seb_tmp, &si->free, u.list) { list_del(&seb->u.list); - kfree(seb); + kmem_cache_free(si->scan_leb_slab, seb); } /* Destroy the volume RB-tree */ @@ -1298,14 +1321,15 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si) rb->rb_right = NULL; } - destroy_sv(sv); + destroy_sv(si, sv); } } + kmem_cache_destroy(si->scan_leb_slab); kfree(si); } -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +#ifdef CONFIG_MTD_UBI_DEBUG /** * paranoid_check_si - check the scanning information. @@ -1323,6 +1347,9 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si) struct ubi_scan_leb *seb, *last_seb; uint8_t *buf; + if (!(ubi_chk_flags & UBI_CHK_GEN)) + return 0; + /* * At first, check that scanning information is OK. */ @@ -1575,4 +1602,4 @@ out: return -EINVAL; } -#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ +#endif /* CONFIG_MTD_UBI_DEBUG */ diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h index a3264f0bef2b..d48aef15ab5d 100644 --- a/drivers/mtd/ubi/scan.h +++ b/drivers/mtd/ubi/scan.h @@ -109,6 +109,7 @@ struct ubi_scan_volume { * @mean_ec: mean erase counter value * @ec_sum: a temporary variable used when calculating @mean_ec * @ec_count: a temporary variable used when calculating @mean_ec + * @scan_leb_slab: slab cache for &struct ubi_scan_leb objects * * This data structure contains the result of scanning and may be used by other * UBI sub-systems to build final UBI data structures, further error-recovery @@ -134,6 +135,7 @@ struct ubi_scan_info { int mean_ec; uint64_t ec_sum; int ec_count; + struct kmem_cache *scan_leb_slab; }; struct ubi_device; diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 0b0149c41fe3..f1be8b79663c 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -40,6 +40,7 @@ #include <linux/notifier.h> #include <linux/mtd/mtd.h> #include <linux/mtd/ubi.h> +#include <asm/pgtable.h> #include "ubi-media.h" #include "scan.h" @@ -381,14 +382,14 @@ struct ubi_wl_entry; * @bad_allowed: whether the MTD device admits of bad physical eraseblocks or * not * @nor_flash: non-zero if working on top of NOR flash + * @max_write_size: maximum amount of bytes the underlying flash can write at a + * time (MTD write buffer size) * @mtd: MTD device descriptor * * @peb_buf1: a buffer of PEB size used for different purposes * @peb_buf2: another buffer of PEB size used for different purposes * @buf_mutex: protects @peb_buf1 and @peb_buf2 * @ckvol_mutex: serializes static volume checking when opening - * @dbg_peb_buf: buffer of PEB size used for debugging - * @dbg_buf_mutex: protects @dbg_peb_buf */ struct ubi_device { struct cdev cdev; @@ -464,16 +465,13 @@ struct ubi_device { int vid_hdr_shift; unsigned int bad_allowed:1; unsigned int nor_flash:1; + int max_write_size; struct mtd_info *mtd; void *peb_buf1; void *peb_buf2; struct mutex buf_mutex; struct mutex ckvol_mutex; -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID - void *dbg_peb_buf; - struct mutex dbg_buf_mutex; -#endif }; extern struct kmem_cache *ubi_wl_entry_slab; diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index c47620dfc722..b79e0dea3632 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -28,7 +28,7 @@ #include <linux/slab.h> #include "ubi.h" -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +#ifdef CONFIG_MTD_UBI_DEBUG static int paranoid_check_volumes(struct ubi_device *ubi); #else #define paranoid_check_volumes(ubi) 0 @@ -711,7 +711,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) volume_sysfs_close(vol); } -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +#ifdef CONFIG_MTD_UBI_DEBUG /** * paranoid_check_volume - check volume information. @@ -876,6 +876,9 @@ static int paranoid_check_volumes(struct ubi_device *ubi) { int i, err = 0; + if (!(ubi_chk_flags & UBI_CHK_GEN)) + return 0; + for (i = 0; i < ubi->vtbl_slots; i++) { err = paranoid_check_volume(ubi, i); if (err) diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 0b8141fc5c26..fd3bf770f518 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -62,7 +62,7 @@ #include <asm/div64.h> #include "ubi.h" -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +#ifdef CONFIG_MTD_UBI_DEBUG static void paranoid_vtbl_check(const struct ubi_device *ubi); #else #define paranoid_vtbl_check(ubi) @@ -868,7 +868,7 @@ out_free: return err; } -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +#ifdef CONFIG_MTD_UBI_DEBUG /** * paranoid_vtbl_check - check volume table. @@ -876,10 +876,13 @@ out_free: */ static void paranoid_vtbl_check(const struct ubi_device *ubi) { + if (!(ubi_chk_flags & UBI_CHK_GEN)) + return; + if (vtbl_check(ubi, ubi->vtbl)) { ubi_err("paranoid check failed"); BUG(); } } -#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ +#endif /* CONFIG_MTD_UBI_DEBUG */ diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 655bbbe415d9..b4cf57db2556 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -161,7 +161,7 @@ struct ubi_work { int torture; }; -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +#ifdef CONFIG_MTD_UBI_DEBUG static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root); @@ -613,7 +613,7 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) list_add_tail(&wrk->list, &ubi->works); ubi_assert(ubi->works_count >= 0); ubi->works_count += 1; - if (ubi->thread_enabled) + if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled()) wake_up_process(ubi->bgt_thread); spin_unlock(&ubi->wl_lock); } @@ -1364,7 +1364,7 @@ int ubi_thread(void *u) spin_lock(&ubi->wl_lock); if (list_empty(&ubi->works) || ubi->ro_mode || - !ubi->thread_enabled) { + !ubi->thread_enabled || ubi_dbg_is_bgt_disabled()) { set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&ubi->wl_lock); schedule(); @@ -1561,7 +1561,7 @@ void ubi_wl_close(struct ubi_device *ubi) kfree(ubi->lookuptbl); } -#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID +#ifdef CONFIG_MTD_UBI_DEBUG /** * paranoid_check_ec - make sure that the erase counter of a PEB is correct. @@ -1578,6 +1578,9 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec) long long read_ec; struct ubi_ec_hdr *ec_hdr; + if (!(ubi_chk_flags & UBI_CHK_GEN)) + return 0; + ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); if (!ec_hdr) return -ENOMEM; @@ -1614,6 +1617,9 @@ out_free: static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) { + if (!(ubi_chk_flags & UBI_CHK_GEN)) + return 0; + if (in_wl_tree(e, root)) return 0; @@ -1636,6 +1642,9 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e) struct ubi_wl_entry *p; int i; + if (!(ubi_chk_flags & UBI_CHK_GEN)) + return 0; + for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) list_for_each_entry(p, &ubi->pq[i], u.list) if (p == e) @@ -1646,4 +1655,5 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e) ubi_dbg_dump_stack(); return -EINVAL; } -#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ + +#endif /* CONFIG_MTD_UBI_DEBUG */ diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h index 9ab58097fa2e..7cb375e0e29c 100644 --- a/drivers/net/atl1c/atl1c.h +++ b/drivers/net/atl1c/atl1c.h @@ -265,7 +265,7 @@ struct atl1c_recv_ret_status { __le32 word3; }; -/* RFD desciptor */ +/* RFD descriptor */ struct atl1c_rx_free_desc { __le64 buffer_addr; }; @@ -531,7 +531,7 @@ struct atl1c_rfd_ring { struct atl1c_buffer *buffer_info; }; -/* receive return desciptor (rrd) ring */ +/* receive return descriptor (rrd) ring */ struct atl1c_rrd_ring { void *desc; /* descriptor ring virtual address */ dma_addr_t dma; /* descriptor ring physical address */ diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index 9d48659e3b28..32e64cc85d2c 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -145,13 +145,6 @@ static struct { { "Broadcom NetXtreme II BCM57712E XGb" } }; -#ifndef PCI_DEVICE_ID_NX2_57712 -#define PCI_DEVICE_ID_NX2_57712 0x1662 -#endif -#ifndef PCI_DEVICE_ID_NX2_57712E -#define PCI_DEVICE_ID_NX2_57712E 0x1663 -#endif - static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index 2d21c60085bc..348b4f1367c9 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -2460,7 +2460,7 @@ map_error: * The 3032 supports sglists by using the 3 addr/len pairs (ALP) * in the IOCB plus a chain of outbound address lists (OAL) that * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) - * will used to point to an OAL when more ALP entries are required. + * will be used to point to an OAL when more ALP entries are required. * The IOCB is always the top of the chain followed by one or more * OALs (when necessary). */ diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index ede017872367..d225077964e2 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h @@ -843,7 +843,7 @@ struct gem_txd { /* GEM requires that RX descriptors are provided four at a time, * aligned. Also, the RX ring may not wrap around. This means that - * there will be at least 4 unused desciptor entries in the middle + * there will be at least 4 unused descriptor entries in the middle * of the RX ring at all times. * * Similar to HME, GEM assumes that it can write garbage bytes before diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c index 7cb301da7474..0825db6d883f 100644 --- a/drivers/net/tile/tilepro.c +++ b/drivers/net/tile/tilepro.c @@ -1,5 +1,5 @@ /* - * Copyright 2010 Tilera Corporation. All Rights Reserved. + * Copyright 2011 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -44,10 +44,6 @@ #include <linux/tcp.h> -/* There is no singlethread_cpu, so schedule work on the current cpu. */ -#define singlethread_cpu -1 - - /* * First, "tile_net_init_module()" initializes all four "devices" which * can be used by linux. @@ -73,15 +69,16 @@ * return, knowing we will be called again later. Otherwise, we * reenable the ingress interrupt, and call "napi_complete()". * + * HACK: Since disabling the ingress interrupt is not reliable, we + * ignore the interrupt if the global "active" flag is false. + * * * NOTE: The use of "native_driver" ensures that EPP exists, and that - * "epp_sendv" is legal, and that "LIPP" is being used. + * we are using "LIPP" and "LEPP". * * NOTE: Failing to free completions for an arbitrarily long time * (which is defined to be illegal) does in fact cause bizarre * problems. The "egress_timer" helps prevent this from happening. - * - * NOTE: The egress code can be interrupted by the interrupt handler. */ @@ -142,6 +139,7 @@ MODULE_AUTHOR("Tilera"); MODULE_LICENSE("GPL"); + /* * Queue of incoming packets for a specific cpu and device. * @@ -177,7 +175,7 @@ struct tile_net_cpu { struct tile_netio_queue queue; /* Statistics. */ struct tile_net_stats_t stats; - /* ISSUE: Is this needed? */ + /* True iff NAPI is enabled. */ bool napi_enabled; /* True if this tile has succcessfully registered with the IPP. */ bool registered; @@ -200,20 +198,20 @@ struct tile_net_cpu { struct tile_net_priv { /* Our network device. */ struct net_device *dev; - /* The actual egress queue. */ - lepp_queue_t *epp_queue; - /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */ - spinlock_t cmd_lock; - /* Protects "epp_queue->comp_head". */ - spinlock_t comp_lock; + /* Pages making up the egress queue. */ + struct page *eq_pages; + /* Address of the actual egress queue. */ + lepp_queue_t *eq; + /* Protects "eq". */ + spinlock_t eq_lock; /* The hypervisor handle for this interface. */ int hv_devhdl; /* The intr bit mask that IDs this device. */ u32 intr_id; /* True iff "tile_net_open_aux()" has succeeded. */ - int partly_opened; - /* True iff "tile_net_open_inner()" has succeeded. */ - int fully_opened; + bool partly_opened; + /* True iff the device is "active". */ + bool active; /* Effective network cpus. */ struct cpumask network_cpus_map; /* Number of network cpus. */ @@ -228,6 +226,10 @@ struct tile_net_priv { struct tile_net_cpu *cpu[NR_CPUS]; }; +/* Log2 of the number of small pages needed for the egress queue. */ +#define EQ_ORDER get_order(sizeof(lepp_queue_t)) +/* Size of the egress queue's pages. */ +#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER)) /* * The actual devices (xgbe0, xgbe1, gbe0, gbe1). @@ -284,7 +286,11 @@ static void net_printk(char *fmt, ...) */ static void dump_packet(unsigned char *data, unsigned long length, char *s) { + int my_cpu = smp_processor_id(); + unsigned long i; + char buf[128]; + static unsigned int count; pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", @@ -294,10 +300,12 @@ static void dump_packet(unsigned char *data, unsigned long length, char *s) for (i = 0; i < length; i++) { if ((i & 0xf) == 0) - sprintf(buf, "%8.8lx:", i); + sprintf(buf, "[%02d] %8.8lx:", my_cpu, i); sprintf(buf + strlen(buf), " %2.2x", data[i]); - if ((i & 0xf) == 0xf || i == length - 1) - pr_info("%s\n", buf); + if ((i & 0xf) == 0xf || i == length - 1) { + strcat(buf, "\n"); + pr_info("%s", buf); + } } } #endif @@ -351,60 +359,109 @@ static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, /* * Provide a linux buffer for LIPP. + * + * Note that the ACTUAL allocation for each buffer is a "struct sk_buff", + * plus a chunk of memory that includes not only the requested bytes, but + * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info". + * + * Note that "struct skb_shared_info" is 88 bytes with 64K pages and + * 268 bytes with 4K pages (since the frags[] array needs 18 entries). + * + * Without jumbo packets, the maximum packet size will be 1536 bytes, + * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told + * the hardware to clip at 1518 bytes instead of 1536 bytes, then we + * could save an entire cache line, but in practice, we don't need it. + * + * Since CPAs are 38 bits, and we can only encode the high 31 bits in + * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must + * align the actual "va" mod 128. + * + * We assume that the underlying "head" will be aligned mod 64. Note + * that in practice, we have seen "head" NOT aligned mod 128 even when + * using 2048 byte allocations, which is surprising. + * + * If "head" WAS always aligned mod 128, we could change LIPP to + * assume that the low SIX bits are zero, and the 7th bit is one, that + * is, align the actual "va" mod 128 plus 64, which would be "free". + * + * For now, the actual "head" pointer points at NET_SKB_PAD bytes of + * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff + * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for + * the actual packet, plus 62 bytes of empty padding, plus some + * padding and the "struct skb_shared_info". + * + * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88 + * bytes, or 1816 bytes, which fits comfortably into 2048 bytes. + * + * With 64K pages, a small buffer thus needs 32+92+4+2+126+88 + * bytes, or 344 bytes, which means we are wasting 64+ bytes, and + * could presumably increase the size of small buffers. + * + * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268 + * bytes, or 1996 bytes, which fits comfortably into 2048 bytes. + * + * With 4K pages, a small buffer thus needs 32+92+4+2+126+268 + * bytes, or 524 bytes, which is annoyingly wasteful. + * + * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192? + * + * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64? */ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, bool small) { - /* ISSUE: What should we use here? */ +#if TILE_NET_MTU <= 1536 + /* Without "jumbo", 2 + 1536 should be sufficient. */ + unsigned int large_size = NET_IP_ALIGN + 1536; +#else + /* ISSUE: This has not been tested. */ unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; +#endif - /* Round up to ensure to avoid "false sharing" with last cache line. */ - unsigned int buffer_size = + /* Avoid "false sharing" with last cache line. */ + /* ISSUE: This is already done by "dev_alloc_skb()". */ + unsigned int len = (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); - /* - * ISSUE: Since CPAs are 38 bits, and we can only encode the - * high 31 bits in a "linux_buffer_t", the low 7 bits must be - * zero, and thus, we must align the actual "va" mod 128. - */ - const unsigned long align = 128; + unsigned int padding = 128 - NET_SKB_PAD; + unsigned int align; struct sk_buff *skb; void *va; struct sk_buff **skb_ptr; - /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */ - /* and also "reserves" that many bytes. */ - /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */ - int len = sizeof(*skb_ptr) + align + buffer_size; - - while (1) { - - /* Allocate (or fail). */ - skb = dev_alloc_skb(len); - if (skb == NULL) - return false; - - /* Make room for a back-pointer to 'skb'. */ - skb_reserve(skb, sizeof(*skb_ptr)); + /* Request 96 extra bytes for alignment purposes. */ + skb = dev_alloc_skb(len + padding); + if (skb == NULL) + return false; - /* Make sure we are aligned. */ - skb_reserve(skb, -(long)skb->data & (align - 1)); + /* Skip 32 or 96 bytes to align "data" mod 128. */ + align = -(long)skb->data & (128 - 1); + BUG_ON(align > padding); + skb_reserve(skb, align); - /* This address is given to IPP. */ - va = skb->data; + /* This address is given to IPP. */ + va = skb->data; - if (small) - break; + /* Buffers must not span a huge page. */ + BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0); - /* ISSUE: This has never been observed! */ - /* Large buffers must not span a huge page. */ - if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0) - break; - pr_err("Leaking unaligned linux buffer at %p.\n", va); +#ifdef TILE_NET_PARANOIA +#if CHIP_HAS_CBOX_HOME_MAP() + if (hash_default) { + HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); + if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) + panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx", + va, hv_pte_get_mode(pte), hv_pte_val(pte)); } +#endif +#endif + + /* Invalidate the packet buffer. */ + if (!hash_default) + __inv_buffer(va, len); /* Skip two bytes to satisfy LIPP assumptions. */ /* Note that this aligns IP on a 16 byte boundary. */ @@ -415,23 +472,9 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, skb_ptr = va - sizeof(*skb_ptr); *skb_ptr = skb; - /* Invalidate the packet buffer. */ - if (!hash_default) - __inv_buffer(skb->data, buffer_size); - /* Make sure "skb_ptr" has been flushed. */ __insn_mf(); -#ifdef TILE_NET_PARANOIA -#if CHIP_HAS_CBOX_HOME_MAP() - if (hash_default) { - HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); - if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) - panic("Non-coherent ingress buffer!"); - } -#endif -#endif - /* Provide the new buffer. */ tile_net_provide_linux_buffer(info, va, small); @@ -469,48 +512,64 @@ oops: * Grab some LEPP completions, and store them in "comps", of size * "comps_size", and return the number of completions which were * stored, so the caller can free them. - * - * If "pending" is not NULL, it will be set to true if there might - * still be some pending completions caused by this tile, else false. */ -static unsigned int tile_net_lepp_grab_comps(struct net_device *dev, +static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq, struct sk_buff *comps[], unsigned int comps_size, - bool *pending) + unsigned int min_size) { - struct tile_net_priv *priv = netdev_priv(dev); - - lepp_queue_t *eq = priv->epp_queue; - unsigned int n = 0; - unsigned int comp_head; - unsigned int comp_busy; - unsigned int comp_tail; - - spin_lock(&priv->comp_lock); - - comp_head = eq->comp_head; - comp_busy = eq->comp_busy; - comp_tail = eq->comp_tail; + unsigned int comp_head = eq->comp_head; + unsigned int comp_busy = eq->comp_busy; while (comp_head != comp_busy && n < comps_size) { comps[n++] = eq->comps[comp_head]; LEPP_QINC(comp_head); } - if (pending != NULL) - *pending = (comp_head != comp_tail); + if (n < min_size) + return 0; eq->comp_head = comp_head; - spin_unlock(&priv->comp_lock); - return n; } /* + * Free some comps, and return true iff there are still some pending. + */ +static bool tile_net_lepp_free_comps(struct net_device *dev, bool all) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + lepp_queue_t *eq = priv->eq; + + struct sk_buff *olds[64]; + unsigned int wanted = 64; + unsigned int i, n; + bool pending; + + spin_lock(&priv->eq_lock); + + if (all) + eq->comp_busy = eq->comp_tail; + + n = tile_net_lepp_grab_comps(eq, olds, wanted, 0); + + pending = (eq->comp_head != eq->comp_tail); + + spin_unlock(&priv->eq_lock); + + for (i = 0; i < n; i++) + kfree_skb(olds[i]); + + return pending; +} + + +/* * Make sure the egress timer is scheduled. * * Note that we use "schedule if not scheduled" logic instead of the more @@ -544,21 +603,11 @@ static void tile_net_handle_egress_timer(unsigned long arg) struct tile_net_cpu *info = (struct tile_net_cpu *)arg; struct net_device *dev = info->napi.dev; - struct sk_buff *olds[32]; - unsigned int wanted = 32; - unsigned int i, nolds = 0; - bool pending; - /* The timer is no longer scheduled. */ info->egress_timer_scheduled = false; - nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending); - - for (i = 0; i < nolds; i++) - kfree_skb(olds[i]); - - /* Reschedule timer if needed. */ - if (pending) + /* Free comps, and reschedule timer if more are pending. */ + if (tile_net_lepp_free_comps(dev, false)) tile_net_schedule_egress_timer(info); } @@ -636,8 +685,39 @@ static bool is_dup_ack(char *s1, char *s2, unsigned int len) +static void tile_net_discard_aux(struct tile_net_cpu *info, int index) +{ + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + + int index2_aux = index + sizeof(netio_pkt_t); + int index2 = + ((index2_aux == + qsp->__packet_receive_queue.__last_packet_plus_one) ? + 0 : index2_aux); + + netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); + + /* Extract the "linux_buffer_t". */ + unsigned int buffer = pkt->__packet.word; + + /* Convert "linux_buffer_t" to "va". */ + void *va = __va((phys_addr_t)(buffer >> 1) << 7); + + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + kfree_skb(skb); + + /* Consume this packet. */ + qup->__packet_receive_read = index2; +} + + /* - * Like "tile_net_handle_packets()", but just discard packets. + * Like "tile_net_poll()", but just discard packets. */ static void tile_net_discard_packets(struct net_device *dev) { @@ -650,32 +730,8 @@ static void tile_net_discard_packets(struct net_device *dev) while (qup->__packet_receive_read != qsp->__packet_receive_queue.__packet_write) { - int index = qup->__packet_receive_read; - - int index2_aux = index + sizeof(netio_pkt_t); - int index2 = - ((index2_aux == - qsp->__packet_receive_queue.__last_packet_plus_one) ? - 0 : index2_aux); - - netio_pkt_t *pkt = (netio_pkt_t *) - ((unsigned long) &qsp[1] + index); - - /* Extract the "linux_buffer_t". */ - unsigned int buffer = pkt->__packet.word; - - /* Convert "linux_buffer_t" to "va". */ - void *va = __va((phys_addr_t)(buffer >> 1) << 7); - - /* Acquire the associated "skb". */ - struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); - struct sk_buff *skb = *skb_ptr; - - kfree_skb(skb); - - /* Consume this packet. */ - qup->__packet_receive_read = index2; + tile_net_discard_aux(info, index); } } @@ -704,7 +760,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); - /* Extract the packet size. */ + /* Extract the packet size. FIXME: Shouldn't the second line */ + /* get subtracted? Mostly moot, since it should be "zero". */ unsigned long len = (NETIO_PKT_CUSTOM_LENGTH(pkt) + NET_IP_ALIGN - NETIO_PACKET_PADDING); @@ -722,15 +779,6 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ unsigned char *buf = va + NET_IP_ALIGN; -#ifdef IGNORE_DUP_ACKS - - static int other; - static int final; - static int keep; - static int skip; - -#endif - /* Invalidate the packet buffer. */ if (!hash_default) __inv_buffer(buf, len); @@ -745,16 +793,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) #ifdef TILE_NET_VERIFY_INGRESS if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { - /* - * FIXME: This complains about UDP packets - * with a "zero" checksum (bug 6624). - */ -#ifdef TILE_NET_PANIC_ON_BAD - dump_packet(buf, len, "rx"); - panic("Bad L4 checksum."); -#else + /* Bug 6624: Includes UDP packets with a "zero" checksum. */ pr_warning("Bad L4 checksum on %d byte packet.\n", len); -#endif } if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { @@ -769,90 +809,29 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) } break; case NETIO_PKT_STATUS_BAD: -#ifdef TILE_NET_PANIC_ON_BAD - dump_packet(buf, len, "rx"); - panic("Unexpected BAD packet."); -#else - pr_warning("Unexpected BAD %d byte packet.\n", len); -#endif + pr_warning("Unexpected BAD %ld byte packet.\n", len); } #endif filter = 0; + /* ISSUE: Filter TCP packets with "bad" checksums? */ + if (!(dev->flags & IFF_UP)) { /* Filter packets received before we're up. */ filter = 1; + } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) { + /* Filter "truncated" packets. */ + filter = 1; } else if (!(dev->flags & IFF_PROMISC)) { - /* - * FIXME: Implement HW multicast filter. - */ - if (is_unicast_ether_addr(buf)) { + /* FIXME: Implement HW multicast filter. */ + if (!is_multicast_ether_addr(buf)) { /* Filter packets not for our address. */ const u8 *mine = dev->dev_addr; filter = compare_ether_addr(mine, buf); } } -#ifdef IGNORE_DUP_ACKS - - if (len != 66) { - /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */ - - other++; - - } else if (index2 == - qsp->__packet_receive_queue.__packet_write) { - - final++; - - } else { - - netio_pkt_t *pkt2 = (netio_pkt_t *) - ((unsigned long) &qsp[1] + index2); - - netio_pkt_metadata_t *metadata2 = - NETIO_PKT_METADATA(pkt2); - - /* Extract the packet size. */ - unsigned long len2 = - (NETIO_PKT_CUSTOM_LENGTH(pkt2) + - NET_IP_ALIGN - NETIO_PACKET_PADDING); - - if (len2 == 66 && - NETIO_PKT_FLOW_HASH_M(metadata, pkt) == - NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) { - - /* Extract the "linux_buffer_t". */ - unsigned int buffer2 = pkt2->__packet.word; - - /* Convert "linux_buffer_t" to "va". */ - void *va2 = - __va((phys_addr_t)(buffer2 >> 1) << 7); - - /* Extract the packet data pointer. */ - /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ - unsigned char *buf2 = va2 + NET_IP_ALIGN; - - /* Invalidate the packet buffer. */ - if (!hash_default) - __inv_buffer(buf2, len2); - - if (is_dup_ack(buf, buf2, len)) { - skip++; - filter = 1; - } else { - keep++; - } - } - } - - if (net_ratelimit()) - pr_info("Other %d Final %d Keep %d Skip %d.\n", - other, final, keep, skip); - -#endif - if (filter) { /* ISSUE: Update "drop" statistics? */ @@ -877,10 +856,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) /* NOTE: This call also sets "skb->dev = dev". */ skb->protocol = eth_type_trans(skb, dev); - /* ISSUE: Discard corrupt packets? */ - /* ISSUE: Discard packets with bad checksums? */ - - /* Avoid recomputing TCP/UDP checksums. */ + /* Avoid recomputing "good" TCP/UDP checksums. */ if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -912,9 +888,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) /* * Handle some packets for the given device on the current CPU. * - * ISSUE: The "rotting packet" race condition occurs if a packet - * arrives after the queue appears to be empty, and before the - * hypervisor interrupt is re-enabled. + * If "tile_net_stop()" is called on some other tile while this + * function is running, we will return, hopefully before that + * other tile asks us to call "napi_disable()". + * + * The "rotting packet" race condition occurs if a packet arrives + * during the extremely narrow window between the queue appearing to + * be empty, and the ingress interrupt being re-enabled. This happens + * a LOT under heavy network load. */ static int tile_net_poll(struct napi_struct *napi, int budget) { @@ -928,7 +909,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget) unsigned int work = 0; - while (1) { + while (priv->active) { int index = qup->__packet_receive_read; if (index == qsp->__packet_receive_queue.__packet_write) break; @@ -941,19 +922,24 @@ static int tile_net_poll(struct napi_struct *napi, int budget) napi_complete(&info->napi); - /* Re-enable hypervisor interrupts. */ + if (!priv->active) + goto done; + + /* Re-enable the ingress interrupt. */ enable_percpu_irq(priv->intr_id); - /* HACK: Avoid the "rotting packet" problem. */ + /* HACK: Avoid the "rotting packet" problem (see above). */ if (qup->__packet_receive_read != - qsp->__packet_receive_queue.__packet_write) - napi_schedule(&info->napi); - - /* ISSUE: Handle completions? */ + qsp->__packet_receive_queue.__packet_write) { + /* ISSUE: Sometimes this returns zero, presumably */ + /* because an interrupt was handled for this tile. */ + (void)napi_reschedule(&info->napi); + } done: - tile_net_provide_needed_buffers(info); + if (priv->active) + tile_net_provide_needed_buffers(info); return work; } @@ -961,6 +947,12 @@ done: /* * Handle an ingress interrupt for the given device on the current cpu. + * + * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has + * been called! This is probably due to "pending hypervisor downcalls". + * + * ISSUE: Is there any race condition between the "napi_schedule()" here + * and the "napi_complete()" call above? */ static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) { @@ -969,9 +961,15 @@ static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; - /* Disable hypervisor interrupt. */ + /* Disable the ingress interrupt. */ disable_percpu_irq(priv->intr_id); + /* Ignore unwanted interrupts. */ + if (!priv->active) + return IRQ_HANDLED; + + /* ISSUE: Sometimes "info->napi_enabled" is false here. */ + napi_schedule(&info->napi); return IRQ_HANDLED; @@ -1005,8 +1003,7 @@ static int tile_net_open_aux(struct net_device *dev) */ { int epp_home = hv_lotar_to_cpu(epp_lotar); - struct page *page = virt_to_page(priv->epp_queue); - homecache_change_page_home(page, 0, epp_home); + homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home); } /* @@ -1015,9 +1012,9 @@ static int tile_net_open_aux(struct net_device *dev) { netio_ipp_address_t ea = { .va = 0, - .pa = __pa(priv->epp_queue), + .pa = __pa(priv->eq), .pte = hv_pte(0), - .size = PAGE_SIZE, + .size = EQ_SIZE, }; ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); @@ -1043,7 +1040,7 @@ static int tile_net_open_aux(struct net_device *dev) /* - * Register with hypervisor on each CPU. + * Register with hypervisor on the current CPU. * * Strangely, this function does important things even if it "fails", * which is especially common if the link is not up yet. Hopefully @@ -1092,7 +1089,8 @@ static void tile_net_register(void *dev_ptr) priv->cpu[my_cpu] = info; /* - * Register ourselves with the IPP. + * Register ourselves with LIPP. This does a lot of stuff, + * including invoking the LIPP registration code. */ ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&config, @@ -1101,8 +1099,11 @@ static void tile_net_register(void *dev_ptr) PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", ret); if (ret < 0) { - printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF" - " failure %d\n", ret); + if (ret != NETIO_LINK_DOWN) { + printk(KERN_DEBUG "hv_dev_pwrite " + "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n", + ret); + } info->link_down = (ret == NETIO_LINK_DOWN); return; } @@ -1145,15 +1146,47 @@ static void tile_net_register(void *dev_ptr) NETIO_IPP_GET_FASTIO_OFF); PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); - netif_napi_add(dev, &info->napi, tile_net_poll, 64); - /* Now we are registered. */ info->registered = true; } /* - * Unregister with hypervisor on each CPU. + * Deregister with hypervisor on the current CPU. + * + * This simply discards all our credits, so no more packets will be + * delivered to this tile. There may still be packets in our queue. + * + * Also, disable the ingress interrupt. + */ +static void tile_net_deregister(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Disable the ingress interrupt. */ + disable_percpu_irq(priv->intr_id); + + /* Do nothing else if not registered. */ + if (info == NULL || !info->registered) + return; + + { + struct tile_netio_queue *queue = &info->queue; + netio_queue_user_impl_t *qup = &queue->__user_part; + + /* Discard all our credits. */ + __netio_fastio_return_credits(qup->__fastio_index, -1); + } +} + + +/* + * Unregister with hypervisor on the current CPU. + * + * Also, disable the ingress interrupt. */ static void tile_net_unregister(void *dev_ptr) { @@ -1162,35 +1195,23 @@ static void tile_net_unregister(void *dev_ptr) int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; - int ret = 0; + int ret; int dummy = 0; - /* Do nothing if never registered. */ - if (info == NULL) - return; + /* Disable the ingress interrupt. */ + disable_percpu_irq(priv->intr_id); - /* Do nothing if already unregistered. */ - if (!info->registered) + /* Do nothing else if not registered. */ + if (info == NULL || !info->registered) return; - /* - * Unregister ourselves with LIPP. - */ + /* Unregister ourselves with LIPP/LEPP. */ ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); - PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n", - ret); - if (ret < 0) { - /* FIXME: Just panic? */ - pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF" - " failure %d\n", ret); - } + if (ret < 0) + panic("Failed to unregister with LIPP/LEPP!\n"); - /* - * Discard all packets still in our NetIO queue. Hopefully, - * once the unregister call is complete, there will be no - * packets still in flight on the IDN. - */ + /* Discard all packets still in our NetIO queue. */ tile_net_discard_packets(dev); /* Reset state. */ @@ -1200,11 +1221,6 @@ static void tile_net_unregister(void *dev_ptr) /* Cancel egress timer. */ del_timer(&info->egress_timer); info->egress_timer_scheduled = false; - - netif_napi_del(&info->napi); - - /* Now we are unregistered. */ - info->registered = false; } @@ -1212,18 +1228,28 @@ static void tile_net_unregister(void *dev_ptr) * Helper function for "tile_net_stop()". * * Also used to handle registration failure in "tile_net_open_inner()", - * when "fully_opened" is known to be false, and the various extra - * steps in "tile_net_stop()" are not necessary. ISSUE: It might be - * simpler if we could just call "tile_net_stop()" anyway. + * when the various extra steps in "tile_net_stop()" are not necessary. */ static void tile_net_stop_aux(struct net_device *dev) { struct tile_net_priv *priv = netdev_priv(dev); + int i; int dummy = 0; - /* Unregister all tiles, so LIPP will stop delivering packets. */ + /* + * Unregister all tiles, so LIPP will stop delivering packets. + * Also, delete all the "napi" objects (sequentially, to protect + * "dev->napi_list"). + */ on_each_cpu(tile_net_unregister, (void *)dev, 1); + for_each_online_cpu(i) { + struct tile_net_cpu *info = priv->cpu[i]; + if (info != NULL && info->registered) { + netif_napi_del(&info->napi); + info->registered = false; + } + } /* Stop LIPP/LEPP. */ if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, @@ -1235,18 +1261,15 @@ static void tile_net_stop_aux(struct net_device *dev) /* - * Disable ingress interrupts for the given device on the current cpu. + * Disable NAPI for the given device on the current cpu. */ -static void tile_net_disable_intr(void *dev_ptr) +static void tile_net_stop_disable(void *dev_ptr) { struct net_device *dev = (struct net_device *)dev_ptr; struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; - /* Disable hypervisor interrupt. */ - disable_percpu_irq(priv->intr_id); - /* Disable NAPI if needed. */ if (info != NULL && info->napi_enabled) { napi_disable(&info->napi); @@ -1256,21 +1279,24 @@ static void tile_net_disable_intr(void *dev_ptr) /* - * Enable ingress interrupts for the given device on the current cpu. + * Enable NAPI and the ingress interrupt for the given device + * on the current cpu. + * + * ISSUE: Only do this for "network cpus"? */ -static void tile_net_enable_intr(void *dev_ptr) +static void tile_net_open_enable(void *dev_ptr) { struct net_device *dev = (struct net_device *)dev_ptr; struct tile_net_priv *priv = netdev_priv(dev); int my_cpu = smp_processor_id(); struct tile_net_cpu *info = priv->cpu[my_cpu]; - /* Enable hypervisor interrupt. */ - enable_percpu_irq(priv->intr_id); - /* Enable NAPI. */ napi_enable(&info->napi); info->napi_enabled = true; + + /* Enable the ingress interrupt. */ + enable_percpu_irq(priv->intr_id); } @@ -1288,8 +1314,9 @@ static int tile_net_open_inner(struct net_device *dev) int my_cpu = smp_processor_id(); struct tile_net_cpu *info; struct tile_netio_queue *queue; - unsigned int irq; + int result = 0; int i; + int dummy = 0; /* * First try to register just on the local CPU, and handle any @@ -1307,42 +1334,52 @@ static int tile_net_open_inner(struct net_device *dev) /* * Now register everywhere else. If any registration fails, * even for "link down" (which might not be possible), we - * clean up using "tile_net_stop_aux()". + * clean up using "tile_net_stop_aux()". Also, add all the + * "napi" objects (sequentially, to protect "dev->napi_list"). + * ISSUE: Only use "netif_napi_add()" for "network cpus"? */ smp_call_function(tile_net_register, (void *)dev, 1); for_each_online_cpu(i) { - if (!priv->cpu[i]->registered) { - tile_net_stop_aux(dev); - return -EAGAIN; - } + struct tile_net_cpu *info = priv->cpu[i]; + if (info->registered) + netif_napi_add(dev, &info->napi, tile_net_poll, 64); + else + result = -EAGAIN; + } + if (result != 0) { + tile_net_stop_aux(dev); + return result; } queue = &info->queue; - /* - * Set the device intr bit mask. - * The tile_net_register above sets per tile __intr_id. - */ - priv->intr_id = queue->__system_part->__intr_id; - BUG_ON(!priv->intr_id); - - /* - * Register the device interrupt handler. - * The __ffs() function returns the index into the interrupt handler - * table from the interrupt bit mask which should have one bit - * and one bit only set. - */ - irq = __ffs(priv->intr_id); - tile_irq_activate(irq, TILE_IRQ_PERCPU); - BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, - 0, dev->name, (void *)dev) != 0); + if (priv->intr_id == 0) { + unsigned int irq; - /* ISSUE: How could "priv->fully_opened" ever be "true" here? */ - - if (!priv->fully_opened) { + /* + * Acquire the irq allocated by the hypervisor. Every + * queue gets the same irq. The "__intr_id" field is + * "1 << irq", so we use "__ffs()" to extract "irq". + */ + priv->intr_id = queue->__system_part->__intr_id; + BUG_ON(priv->intr_id == 0); + irq = __ffs(priv->intr_id); - int dummy = 0; + /* + * Register the ingress interrupt handler for this + * device, permanently. + * + * We used to call "free_irq()" in "tile_net_stop()", + * and then re-register the handler here every time, + * but that caused DNP errors in "handle_IRQ_event()" + * because "desc->action" was NULL. See bug 9143. + */ + tile_irq_activate(irq, TILE_IRQ_PERCPU); + BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, + 0, dev->name, (void *)dev) != 0); + } + { /* Allocate initial buffers. */ int max_buffers = @@ -1359,18 +1396,21 @@ static int tile_net_open_inner(struct net_device *dev) if (info->num_needed_small_buffers != 0 || info->num_needed_large_buffers != 0) panic("Insufficient memory for buffer stack!"); + } - /* Start LIPP/LEPP and activate "ingress" at the shim. */ - if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, - sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) - panic("Failed to activate the LIPP Shim!\n"); + /* We are about to be active. */ + priv->active = true; - priv->fully_opened = 1; - } + /* Make sure "active" is visible to all tiles. */ + mb(); - /* On each tile, enable the hypervisor to trigger interrupts. */ - /* ISSUE: Do this before starting LIPP/LEPP? */ - on_each_cpu(tile_net_enable_intr, (void *)dev, 1); + /* On each tile, enable NAPI and the ingress interrupt. */ + on_each_cpu(tile_net_open_enable, (void *)dev, 1); + + /* Start LIPP/LEPP and activate "ingress" at the shim. */ + if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) + panic("Failed to activate the LIPP Shim!\n"); /* Start our transmit queue. */ netif_start_queue(dev); @@ -1396,9 +1436,9 @@ static void tile_net_open_retry(struct work_struct *w) * ourselves to try again later; otherwise, tell Linux we now have * a working link. ISSUE: What if the return value is negative? */ - if (tile_net_open_inner(priv->dev)) - schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, - TILE_NET_RETRY_INTERVAL); + if (tile_net_open_inner(priv->dev) != 0) + schedule_delayed_work(&priv->retry_work, + TILE_NET_RETRY_INTERVAL); else netif_carrier_on(priv->dev); } @@ -1412,8 +1452,8 @@ static void tile_net_open_retry(struct work_struct *w) * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt - * handler is registered with the OS, the watchdog timer is started, - * and the stack is notified that the interface is ready. + * handler is registered with the OS (if needed), the watchdog timer + * is started, and the stack is notified that the interface is ready. * * If the actual link is not available yet, then we tell Linux that * we have no carrier, and we keep checking until the link comes up. @@ -1468,6 +1508,10 @@ static int tile_net_open(struct net_device *dev) #endif priv->partly_opened = 1; + + } else { + /* FIXME: Is this possible? */ + /* printk("Already partly opened.\n"); */ } /* @@ -1487,57 +1531,17 @@ static int tile_net_open(struct net_device *dev) * and then remember to try again later. */ netif_carrier_off(dev); - schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, - TILE_NET_RETRY_INTERVAL); + schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL); return 0; } -/* - * Disables a network interface. - * - * Returns 0, this is not allowed to fail. - * - * The close entry point is called when an interface is de-activated - * by the OS. The hardware is still under the drivers control, but - * needs to be disabled. A global MAC reset is issued to stop the - * hardware, and all transmit and receive resources are freed. - * - * ISSUE: Can this can be called while "tile_net_poll()" is running? - */ -static int tile_net_stop(struct net_device *dev) +static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv) { - struct tile_net_priv *priv = netdev_priv(dev); - - bool pending = true; - - PDEBUG("tile_net_stop()\n"); - - /* ISSUE: Only needed if not yet fully open. */ - cancel_delayed_work_sync(&priv->retry_work); - - /* Can't transmit any more. */ - netif_stop_queue(dev); - - /* - * Disable hypervisor interrupts on each tile. - */ - on_each_cpu(tile_net_disable_intr, (void *)dev, 1); - - /* - * Unregister the interrupt handler. - * The __ffs() function returns the index into the interrupt handler - * table from the interrupt bit mask which should have one bit - * and one bit only set. - */ - if (priv->intr_id) - free_irq(__ffs(priv->intr_id), dev); - - /* - * Drain all the LIPP buffers. - */ + int n = 0; + /* Drain all the LIPP buffers. */ while (true) { int buffer; @@ -1560,43 +1564,105 @@ static int tile_net_stop(struct net_device *dev) kfree_skb(skb); } + + n++; } - /* Stop LIPP/LEPP. */ - tile_net_stop_aux(dev); + return n; +} - priv->fully_opened = 0; +/* + * Disables a network interface. + * + * Returns 0, this is not allowed to fail. + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + * + * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"? + * + * Before we are called by "__dev_close()", "netif_running()" will + * have been cleared, so no NEW calls to "tile_net_poll()" will be + * made by "netpoll_poll_dev()". + * + * Often, this can cause some tiles to still have packets in their + * queues, so we must call "tile_net_discard_packets()" later. + * + * Note that some other tile may still be INSIDE "tile_net_poll()", + * and in fact, many will be, if there is heavy network load. + * + * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when + * any tile is still "napi_schedule()"'d will induce a horrible crash + * when "msleep()" is called. This includes tiles which are inside + * "tile_net_poll()" which have not yet called "napi_complete()". + * + * So, we must first try to wait long enough for other tiles to finish + * with any current "tile_net_poll()" call, and, hopefully, to clear + * the "scheduled" flag. ISSUE: It is unclear what happens to tiles + * which have called "napi_schedule()" but which had not yet tried to + * call "tile_net_poll()", or which exhausted their budget inside + * "tile_net_poll()" just before this function was called. + */ +static int tile_net_stop(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + PDEBUG("tile_net_stop()\n"); + /* Start discarding packets. */ + priv->active = false; + + /* Make sure "active" is visible to all tiles. */ + mb(); /* - * XXX: ISSUE: It appears that, in practice anyway, by the - * time we get here, there are no pending completions. + * On each tile, make sure no NEW packets get delivered, and + * disable the ingress interrupt. + * + * Note that the ingress interrupt can fire AFTER this, + * presumably due to packets which were recently delivered, + * but it will have no effect. */ - while (pending) { + on_each_cpu(tile_net_deregister, (void *)dev, 1); - struct sk_buff *olds[32]; - unsigned int wanted = 32; - unsigned int i, nolds = 0; + /* Optimistically drain LIPP buffers. */ + (void)tile_net_drain_lipp_buffers(priv); - nolds = tile_net_lepp_grab_comps(dev, olds, - wanted, &pending); + /* ISSUE: Only needed if not yet fully open. */ + cancel_delayed_work_sync(&priv->retry_work); - /* ISSUE: We have never actually seen this debug spew. */ - if (nolds != 0) - pr_info("During tile_net_stop(), grabbed %d comps.\n", - nolds); + /* Can't transmit any more. */ + netif_stop_queue(dev); - for (i = 0; i < nolds; i++) - kfree_skb(olds[i]); - } + /* Disable NAPI on each tile. */ + on_each_cpu(tile_net_stop_disable, (void *)dev, 1); + + /* + * Drain any remaining LIPP buffers. NOTE: This "printk()" + * has never been observed, but in theory it could happen. + */ + if (tile_net_drain_lipp_buffers(priv) != 0) + printk("Had to drain some extra LIPP buffers!\n"); + /* Stop LIPP/LEPP. */ + tile_net_stop_aux(dev); + + /* + * ISSUE: It appears that, in practice anyway, by the time we + * get here, there are no pending completions, but just in case, + * we free (all of) them anyway. + */ + while (tile_net_lepp_free_comps(dev, true)) + /* loop */; /* Wipe the EPP queue. */ - memset(priv->epp_queue, 0, sizeof(lepp_queue_t)); + memset(priv->eq, 0, sizeof(lepp_queue_t)); /* Evict the EPP queue. */ - finv_buffer(priv->epp_queue, PAGE_SIZE); + finv_buffer(priv->eq, EQ_SIZE); return 0; } @@ -1620,7 +1686,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, if (b_len != 0) { if (!hash_default) - finv_buffer_remote(b_data, b_len); + finv_buffer_remote(b_data, b_len, 0); cpa = __pa(b_data); frags[n].cpa_lo = cpa; @@ -1643,7 +1709,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, if (!hash_default) { void *va = pfn_to_kaddr(pfn) + f->page_offset; BUG_ON(PageHighMem(f->page)); - finv_buffer_remote(va, f->size); + finv_buffer_remote(va, f->size, 0); } cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; @@ -1742,17 +1808,15 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) unsigned long irqflags; - lepp_queue_t *eq = priv->epp_queue; + lepp_queue_t *eq = priv->eq; - struct sk_buff *olds[4]; - unsigned int wanted = 4; + struct sk_buff *olds[8]; + unsigned int wanted = 8; unsigned int i, nolds = 0; unsigned int cmd_head, cmd_tail, cmd_next; unsigned int comp_tail; - unsigned int free_slots; - /* Paranoia. */ BUG_ON(skb->protocol != htons(ETH_P_IP)); @@ -1780,34 +1844,32 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) /* Enqueue the command. */ - spin_lock_irqsave(&priv->cmd_lock, irqflags); + spin_lock_irqsave(&priv->eq_lock, irqflags); /* * Handle completions if needed to make room. * HACK: Spin until there is sufficient room. */ - free_slots = lepp_num_free_comp_slots(eq); - if (free_slots < 1) { -spin: - nolds += tile_net_lepp_grab_comps(dev, olds + nolds, - wanted - nolds, NULL); - if (lepp_num_free_comp_slots(eq) < 1) - goto spin; + if (lepp_num_free_comp_slots(eq) == 0) { + nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); + if (nolds == 0) { +busy: + spin_unlock_irqrestore(&priv->eq_lock, irqflags); + return NETDEV_TX_BUSY; + } } cmd_head = eq->cmd_head; cmd_tail = eq->cmd_tail; - /* NOTE: The "gotos" below are untested. */ - /* Prepare to advance, detecting full queue. */ cmd_next = cmd_tail + cmd_size; if (cmd_tail < cmd_head && cmd_next >= cmd_head) - goto spin; + goto busy; if (cmd_next > LEPP_CMD_LIMIT) { cmd_next = 0; if (cmd_next == cmd_head) - goto spin; + goto busy; } /* Copy the command. */ @@ -1823,14 +1885,18 @@ spin: eq->comp_tail = comp_tail; /* Flush before allowing LEPP to handle the command. */ + /* ISSUE: Is this the optimal location for the flush? */ __insn_mf(); eq->cmd_tail = cmd_tail; - spin_unlock_irqrestore(&priv->cmd_lock, irqflags); - + /* NOTE: Using "4" here is more efficient than "0" or "2", */ + /* and, strangely, more efficient than pre-checking the number */ + /* of available completions, and comparing it to 4. */ if (nolds == 0) - nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); + nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); + + spin_unlock_irqrestore(&priv->eq_lock, irqflags); /* Handle completions. */ for (i = 0; i < nolds; i++) @@ -1870,10 +1936,10 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) unsigned int num_frags; - lepp_queue_t *eq = priv->epp_queue; + lepp_queue_t *eq = priv->eq; - struct sk_buff *olds[4]; - unsigned int wanted = 4; + struct sk_buff *olds[8]; + unsigned int wanted = 8; unsigned int i, nolds = 0; unsigned int cmd_size = sizeof(lepp_cmd_t); @@ -1883,8 +1949,6 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) lepp_cmd_t cmds[LEPP_MAX_FRAGS]; - unsigned int free_slots; - /* * This is paranoia, since we think that if the link doesn't come @@ -1905,7 +1969,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) if (hash_default) { HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) - panic("Non-coherent egress buffer!"); + panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx", + data, hv_pte_get_mode(pte), hv_pte_val(pte)); } #endif #endif @@ -1958,37 +2023,35 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) /* Enqueue the commands. */ - spin_lock_irqsave(&priv->cmd_lock, irqflags); + spin_lock_irqsave(&priv->eq_lock, irqflags); /* * Handle completions if needed to make room. * HACK: Spin until there is sufficient room. */ - free_slots = lepp_num_free_comp_slots(eq); - if (free_slots < 1) { -spin: - nolds += tile_net_lepp_grab_comps(dev, olds + nolds, - wanted - nolds, NULL); - if (lepp_num_free_comp_slots(eq) < 1) - goto spin; + if (lepp_num_free_comp_slots(eq) == 0) { + nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); + if (nolds == 0) { +busy: + spin_unlock_irqrestore(&priv->eq_lock, irqflags); + return NETDEV_TX_BUSY; + } } cmd_head = eq->cmd_head; cmd_tail = eq->cmd_tail; - /* NOTE: The "gotos" below are untested. */ - /* Copy the commands, or fail. */ for (i = 0; i < num_frags; i++) { /* Prepare to advance, detecting full queue. */ cmd_next = cmd_tail + cmd_size; if (cmd_tail < cmd_head && cmd_next >= cmd_head) - goto spin; + goto busy; if (cmd_next > LEPP_CMD_LIMIT) { cmd_next = 0; if (cmd_next == cmd_head) - goto spin; + goto busy; } /* Copy the command. */ @@ -2005,14 +2068,18 @@ spin: eq->comp_tail = comp_tail; /* Flush before allowing LEPP to handle the command. */ + /* ISSUE: Is this the optimal location for the flush? */ __insn_mf(); eq->cmd_tail = cmd_tail; - spin_unlock_irqrestore(&priv->cmd_lock, irqflags); - + /* NOTE: Using "4" here is more efficient than "0" or "2", */ + /* and, strangely, more efficient than pre-checking the number */ + /* of available completions, and comparing it to 4. */ if (nolds == 0) - nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); + nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); + + spin_unlock_irqrestore(&priv->eq_lock, irqflags); /* Handle completions. */ for (i = 0; i < nolds; i++) @@ -2261,7 +2328,6 @@ static struct net_device *tile_net_dev_init(const char *name) int ret; struct net_device *dev; struct tile_net_priv *priv; - struct page *page; /* * Allocate the device structure. This allocates "priv", calls @@ -2285,23 +2351,21 @@ static struct net_device *tile_net_dev_init(const char *name) INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); - spin_lock_init(&priv->cmd_lock); - spin_lock_init(&priv->comp_lock); + spin_lock_init(&priv->eq_lock); - /* Allocate "epp_queue". */ - BUG_ON(get_order(sizeof(lepp_queue_t)) != 0); - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); - if (!page) { + /* Allocate "eq". */ + priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER); + if (!priv->eq_pages) { free_netdev(dev); return NULL; } - priv->epp_queue = page_address(page); + priv->eq = page_address(priv->eq_pages); /* Register the network device. */ ret = register_netdev(dev); if (ret) { pr_err("register_netdev %s failed %d\n", dev->name, ret); - free_page((unsigned long)priv->epp_queue); + __free_pages(priv->eq_pages, EQ_ORDER); free_netdev(dev); return NULL; } @@ -2310,7 +2374,7 @@ static struct net_device *tile_net_dev_init(const char *name) ret = tile_net_get_mac(dev); if (ret < 0) { unregister_netdev(dev); - free_page((unsigned long)priv->epp_queue); + __free_pages(priv->eq_pages, EQ_ORDER); free_netdev(dev); return NULL; } @@ -2321,6 +2385,9 @@ static struct net_device *tile_net_dev_init(const char *name) /* * Module cleanup. + * + * FIXME: If compiled as a module, this module cannot be "unloaded", + * because the "ingress interrupt handler" is registered permanently. */ static void tile_net_cleanup(void) { @@ -2331,8 +2398,8 @@ static void tile_net_cleanup(void) struct net_device *dev = tile_net_devs[i]; struct tile_net_priv *priv = netdev_priv(dev); unregister_netdev(dev); - finv_buffer(priv->epp_queue, PAGE_SIZE); - free_page((unsigned long)priv->epp_queue); + finv_buffer(priv->eq, EQ_SIZE); + __free_pages(priv->eq_pages, EQ_ORDER); free_netdev(dev); } } @@ -2355,7 +2422,12 @@ static int tile_net_init_module(void) } +module_init(tile_net_init_module); +module_exit(tile_net_cleanup); + + #ifndef MODULE + /* * The "network_cpus" boot argument specifies the cpus that are dedicated * to handle ingress packets. @@ -2391,8 +2463,5 @@ static int __init network_cpus_setup(char *str) return 0; } __setup("network_cpus=", network_cpus_setup); -#endif - -module_init(tile_net_init_module); -module_exit(tile_net_cleanup); +#endif diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 59f55441e075..b8ef8ddcc292 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -258,8 +258,10 @@ op_add_sample(struct oprofile_cpu_buffer *cpu_buf, */ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, - unsigned long backtrace, int is_kernel, unsigned long event) + unsigned long backtrace, int is_kernel, unsigned long event, + struct task_struct *task) { + struct task_struct *tsk = task ? task : current; cpu_buf->sample_received++; if (pc == ESCAPE_CODE) { @@ -267,7 +269,7 @@ log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, return 0; } - if (op_add_code(cpu_buf, backtrace, is_kernel, current)) + if (op_add_code(cpu_buf, backtrace, is_kernel, tsk)) goto fail; if (op_add_sample(cpu_buf, pc, event)) @@ -292,7 +294,8 @@ static inline void oprofile_end_trace(struct oprofile_cpu_buffer *cpu_buf) static inline void __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, - unsigned long event, int is_kernel) + unsigned long event, int is_kernel, + struct task_struct *task) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); unsigned long backtrace = oprofile_backtrace_depth; @@ -301,7 +304,7 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, * if log_sample() fail we can't backtrace since we lost the * source of this event */ - if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event)) + if (!log_sample(cpu_buf, pc, backtrace, is_kernel, event, task)) /* failed */ return; @@ -313,10 +316,17 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, oprofile_end_trace(cpu_buf); } +void oprofile_add_ext_hw_sample(unsigned long pc, struct pt_regs * const regs, + unsigned long event, int is_kernel, + struct task_struct *task) +{ + __oprofile_add_ext_sample(pc, regs, event, is_kernel, task); +} + void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs, unsigned long event, int is_kernel) { - __oprofile_add_ext_sample(pc, regs, event, is_kernel); + __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); } void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) @@ -332,7 +342,7 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event) pc = ESCAPE_CODE; /* as this causes an early return. */ } - __oprofile_add_ext_sample(pc, regs, event, is_kernel); + __oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL); } /* @@ -403,7 +413,7 @@ int oprofile_write_commit(struct op_entry *entry) void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event) { struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer); - log_sample(cpu_buf, pc, 0, is_kernel, event); + log_sample(cpu_buf, pc, 0, is_kernel, event, NULL); } void oprofile_add_trace(unsigned long pc) diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c index 010725117dbb..3ef44624f510 100644 --- a/drivers/oprofile/timer_int.c +++ b/drivers/oprofile/timer_int.c @@ -97,7 +97,7 @@ static struct notifier_block __refdata oprofile_cpu_notifier = { .notifier_call = oprofile_cpu_notify, }; -int __init oprofile_timer_init(struct oprofile_operations *ops) +int oprofile_timer_init(struct oprofile_operations *ops) { int rc; @@ -113,7 +113,7 @@ int __init oprofile_timer_init(struct oprofile_operations *ops) return 0; } -void __exit oprofile_timer_exit(void) +void oprofile_timer_exit(void) { unregister_hotcpu_notifier(&oprofile_cpu_notifier); } diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 38b34a73866a..ad3d099bf5c1 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -39,7 +39,6 @@ #include <linux/slab.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> -#include <linux/dmi.h> #include <acpi/acpi_drivers.h> diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c index d554368c9f57..1d28d4451dae 100644 --- a/drivers/rtc/rtc-pl030.c +++ b/drivers/rtc/rtc-pl030.c @@ -97,7 +97,7 @@ static const struct rtc_class_ops pl030_ops = { .set_alarm = pl030_set_alarm, }; -static int pl030_probe(struct amba_device *dev, struct amba_id *id) +static int pl030_probe(struct amba_device *dev, const struct amba_id *id) { struct pl030_rtc *rtc; int ret; diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index d829ea63c4fb..ff1b84bd9bb5 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -307,7 +307,7 @@ static int pl031_remove(struct amba_device *adev) return 0; } -static int pl031_probe(struct amba_device *adev, struct amba_id *id) +static int pl031_probe(struct amba_device *adev, const struct amba_id *id) { int ret; struct pl031_local *ldata; diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 51c666fb67a4..645b0fcbb370 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -122,36 +122,21 @@ static int __init zfcp_module_init(void) { int retval = -ENOMEM; - zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn", - sizeof(struct zfcp_fc_gpn_ft_req)); - if (!zfcp_data.gpn_ft_cache) - goto out; - - zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb", - sizeof(struct fsf_qtcb)); - if (!zfcp_data.qtcb_cache) + zfcp_fsf_qtcb_cache = zfcp_cache_hw_align("zfcp_fsf_qtcb", + sizeof(struct fsf_qtcb)); + if (!zfcp_fsf_qtcb_cache) goto out_qtcb_cache; - zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr", - sizeof(struct fsf_status_read_buffer)); - if (!zfcp_data.sr_buffer_cache) - goto out_sr_cache; + zfcp_fc_req_cache = zfcp_cache_hw_align("zfcp_fc_req", + sizeof(struct zfcp_fc_req)); + if (!zfcp_fc_req_cache) + goto out_fc_cache; - zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid", - sizeof(struct zfcp_fc_gid_pn)); - if (!zfcp_data.gid_pn_cache) - goto out_gid_cache; - - zfcp_data.adisc_cache = zfcp_cache_hw_align("zfcp_adisc", - sizeof(struct zfcp_fc_els_adisc)); - if (!zfcp_data.adisc_cache) - goto out_adisc_cache; - - zfcp_data.scsi_transport_template = + zfcp_scsi_transport_template = fc_attach_transport(&zfcp_transport_functions); - if (!zfcp_data.scsi_transport_template) + if (!zfcp_scsi_transport_template) goto out_transport; - scsi_transport_reserve_device(zfcp_data.scsi_transport_template, + scsi_transport_reserve_device(zfcp_scsi_transport_template, sizeof(struct zfcp_scsi_dev)); @@ -175,18 +160,12 @@ static int __init zfcp_module_init(void) out_ccw_register: misc_deregister(&zfcp_cfdc_misc); out_misc: - fc_release_transport(zfcp_data.scsi_transport_template); + fc_release_transport(zfcp_scsi_transport_template); out_transport: - kmem_cache_destroy(zfcp_data.adisc_cache); -out_adisc_cache: - kmem_cache_destroy(zfcp_data.gid_pn_cache); -out_gid_cache: - kmem_cache_destroy(zfcp_data.sr_buffer_cache); -out_sr_cache: - kmem_cache_destroy(zfcp_data.qtcb_cache); + kmem_cache_destroy(zfcp_fc_req_cache); +out_fc_cache: + kmem_cache_destroy(zfcp_fsf_qtcb_cache); out_qtcb_cache: - kmem_cache_destroy(zfcp_data.gpn_ft_cache); -out: return retval; } @@ -196,12 +175,9 @@ static void __exit zfcp_module_exit(void) { ccw_driver_unregister(&zfcp_ccw_driver); misc_deregister(&zfcp_cfdc_misc); - fc_release_transport(zfcp_data.scsi_transport_template); - kmem_cache_destroy(zfcp_data.adisc_cache); - kmem_cache_destroy(zfcp_data.gid_pn_cache); - kmem_cache_destroy(zfcp_data.sr_buffer_cache); - kmem_cache_destroy(zfcp_data.qtcb_cache); - kmem_cache_destroy(zfcp_data.gpn_ft_cache); + fc_release_transport(zfcp_scsi_transport_template); + kmem_cache_destroy(zfcp_fc_req_cache); + kmem_cache_destroy(zfcp_fsf_qtcb_cache); } module_exit(zfcp_module_exit); @@ -260,18 +236,18 @@ static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) return -ENOMEM; adapter->pool.qtcb_pool = - mempool_create_slab_pool(4, zfcp_data.qtcb_cache); + mempool_create_slab_pool(4, zfcp_fsf_qtcb_cache); if (!adapter->pool.qtcb_pool) return -ENOMEM; - adapter->pool.status_read_data = - mempool_create_slab_pool(FSF_STATUS_READS_RECOM, - zfcp_data.sr_buffer_cache); - if (!adapter->pool.status_read_data) + BUILD_BUG_ON(sizeof(struct fsf_status_read_buffer) > PAGE_SIZE); + adapter->pool.sr_data = + mempool_create_page_pool(FSF_STATUS_READS_RECOM, 0); + if (!adapter->pool.sr_data) return -ENOMEM; adapter->pool.gid_pn = - mempool_create_slab_pool(1, zfcp_data.gid_pn_cache); + mempool_create_slab_pool(1, zfcp_fc_req_cache); if (!adapter->pool.gid_pn) return -ENOMEM; @@ -290,8 +266,8 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter) mempool_destroy(adapter->pool.qtcb_pool); if (adapter->pool.status_read_req) mempool_destroy(adapter->pool.status_read_req); - if (adapter->pool.status_read_data) - mempool_destroy(adapter->pool.status_read_data); + if (adapter->pool.sr_data) + mempool_destroy(adapter->pool.sr_data); if (adapter->pool.gid_pn) mempool_destroy(adapter->pool.gid_pn); } @@ -386,6 +362,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); INIT_WORK(&adapter->scan_work, zfcp_fc_scan_ports); + INIT_WORK(&adapter->ns_up_work, zfcp_fc_sym_name_update); if (zfcp_qdio_setup(adapter)) goto failed; @@ -437,7 +414,7 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; - if (!zfcp_adapter_scsi_register(adapter)) + if (!zfcp_scsi_adapter_register(adapter)) return adapter; failed: @@ -451,10 +428,11 @@ void zfcp_adapter_unregister(struct zfcp_adapter *adapter) cancel_work_sync(&adapter->scan_work); cancel_work_sync(&adapter->stat_work); + cancel_work_sync(&adapter->ns_up_work); zfcp_destroy_adapter_work_queue(adapter); zfcp_fc_wka_ports_force_offline(adapter->gs); - zfcp_adapter_scsi_unregister(adapter); + zfcp_scsi_adapter_unregister(adapter); sysfs_remove_group(&cdev->dev.kobj, &zfcp_sysfs_adapter_attrs); zfcp_erp_thread_kill(adapter); diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 9ae1d0a6f627..527ba48eea57 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -89,7 +89,6 @@ struct zfcp_reqlist; #define ZFCP_STATUS_LUN_READONLY 0x00000008 /* FSF request status (this does not have a common part) */ -#define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT 0x00000002 #define ZFCP_STATUS_FSFREQ_ERROR 0x00000008 #define ZFCP_STATUS_FSFREQ_CLEANUP 0x00000010 #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED 0x00000040 @@ -108,7 +107,7 @@ struct zfcp_adapter_mempool { mempool_t *scsi_req; mempool_t *scsi_abort; mempool_t *status_read_req; - mempool_t *status_read_data; + mempool_t *sr_data; mempool_t *gid_pn; mempool_t *qtcb_pool; }; @@ -190,6 +189,7 @@ struct zfcp_adapter { struct fsf_qtcb_bottom_port *stats_reset_data; unsigned long stats_reset; struct work_struct scan_work; + struct work_struct ns_up_work; struct service_level service_level; struct workqueue_struct *work_queue; struct device_dma_parameters dma_parms; @@ -314,15 +314,4 @@ struct zfcp_fsf_req { void (*handler)(struct zfcp_fsf_req *); }; -/* driver data */ -struct zfcp_data { - struct scsi_host_template scsi_host_template; - struct scsi_transport_template *scsi_transport_template; - struct kmem_cache *gpn_ft_cache; - struct kmem_cache *qtcb_cache; - struct kmem_cache *sr_buffer_cache; - struct kmem_cache *gid_pn_cache; - struct kmem_cache *adisc_cache; -}; - #endif /* ZFCP_DEF_H */ diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index e003e306f870..e1b4f800e226 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -732,7 +732,7 @@ static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *act) if (zfcp_erp_adapter_strategy_open_fsf_xport(act) == ZFCP_ERP_FAILED) return ZFCP_ERP_FAILED; - if (mempool_resize(act->adapter->pool.status_read_data, + if (mempool_resize(act->adapter->pool.sr_data, act->adapter->stat_read_buf_num, GFP_KERNEL)) return ZFCP_ERP_FAILED; @@ -1231,8 +1231,10 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) if (result == ZFCP_ERP_SUCCEEDED) { register_service_level(&adapter->service_level); queue_work(adapter->work_queue, &adapter->scan_work); + queue_work(adapter->work_queue, &adapter->ns_up_work); } else unregister_service_level(&adapter->service_level); + kref_put(&adapter->ref, zfcp_adapter_release); break; } diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 6e325284fbe7..03627cfd81cd 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -80,6 +80,7 @@ extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long); extern void zfcp_erp_timeout_handler(unsigned long); /* zfcp_fc.c */ +extern struct kmem_cache *zfcp_fc_req_cache; extern void zfcp_fc_enqueue_event(struct zfcp_adapter *, enum fc_host_event_code event_code, u32); extern void zfcp_fc_post_event(struct work_struct *); @@ -95,8 +96,10 @@ extern int zfcp_fc_gs_setup(struct zfcp_adapter *); extern void zfcp_fc_gs_destroy(struct zfcp_adapter *); extern int zfcp_fc_exec_bsg_job(struct fc_bsg_job *); extern int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *); +extern void zfcp_fc_sym_name_update(struct work_struct *); /* zfcp_fsf.c */ +extern struct kmem_cache *zfcp_fsf_qtcb_cache; extern int zfcp_fsf_open_port(struct zfcp_erp_action *); extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *); extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *); @@ -139,9 +142,9 @@ extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *, struct qdio_buffer *); /* zfcp_scsi.c */ -extern struct zfcp_data zfcp_data; -extern int zfcp_adapter_scsi_register(struct zfcp_adapter *); -extern void zfcp_adapter_scsi_unregister(struct zfcp_adapter *); +extern struct scsi_transport_template *zfcp_scsi_transport_template; +extern int zfcp_scsi_adapter_register(struct zfcp_adapter *); +extern void zfcp_scsi_adapter_unregister(struct zfcp_adapter *); extern struct fc_function_template zfcp_transport_functions; extern void zfcp_scsi_rport_work(struct work_struct *); extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 30cf91a787a3..297e6b71ce9c 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -11,11 +11,14 @@ #include <linux/types.h> #include <linux/slab.h> +#include <linux/utsname.h> #include <scsi/fc/fc_els.h> #include <scsi/libfc.h> #include "zfcp_ext.h" #include "zfcp_fc.h" +struct kmem_cache *zfcp_fc_req_cache; + static u32 zfcp_fc_rscn_range_mask[] = { [ELS_ADDR_FMT_PORT] = 0xFFFFFF, [ELS_ADDR_FMT_AREA] = 0xFFFF00, @@ -260,24 +263,18 @@ void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req) zfcp_fc_incoming_rscn(fsf_req); } -static void zfcp_fc_ns_gid_pn_eval(void *data) +static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req) { - struct zfcp_fc_gid_pn *gid_pn = data; - struct zfcp_fsf_ct_els *ct = &gid_pn->ct; - struct zfcp_fc_gid_pn_req *gid_pn_req = sg_virt(ct->req); - struct zfcp_fc_gid_pn_resp *gid_pn_resp = sg_virt(ct->resp); - struct zfcp_port *port = gid_pn->port; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp; - if (ct->status) + if (ct_els->status) return; - if (gid_pn_resp->ct_hdr.ct_cmd != FC_FS_ACC) + if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC) return; - /* paranoia */ - if (gid_pn_req->gid_pn.fn_wwpn != port->wwpn) - return; /* looks like a valid d_id */ - port->d_id = ntoh24(gid_pn_resp->gid_pn.fp_fid); + ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid); } static void zfcp_fc_complete(void *data) @@ -285,69 +282,73 @@ static void zfcp_fc_complete(void *data) complete(data); } +static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size) +{ + ct_hdr->ct_rev = FC_CT_REV; + ct_hdr->ct_fs_type = FC_FST_DIR; + ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE; + ct_hdr->ct_cmd = cmd; + ct_hdr->ct_mr_size = mr_size / 4; +} + static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port, - struct zfcp_fc_gid_pn *gid_pn) + struct zfcp_fc_req *fc_req) { struct zfcp_adapter *adapter = port->adapter; DECLARE_COMPLETION_ONSTACK(completion); + struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req; + struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp; int ret; /* setup parameters for send generic command */ - gid_pn->port = port; - gid_pn->ct.handler = zfcp_fc_complete; - gid_pn->ct.handler_data = &completion; - gid_pn->ct.req = &gid_pn->sg_req; - gid_pn->ct.resp = &gid_pn->sg_resp; - sg_init_one(&gid_pn->sg_req, &gid_pn->gid_pn_req, - sizeof(struct zfcp_fc_gid_pn_req)); - sg_init_one(&gid_pn->sg_resp, &gid_pn->gid_pn_resp, - sizeof(struct zfcp_fc_gid_pn_resp)); - - /* setup nameserver request */ - gid_pn->gid_pn_req.ct_hdr.ct_rev = FC_CT_REV; - gid_pn->gid_pn_req.ct_hdr.ct_fs_type = FC_FST_DIR; - gid_pn->gid_pn_req.ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE; - gid_pn->gid_pn_req.ct_hdr.ct_options = 0; - gid_pn->gid_pn_req.ct_hdr.ct_cmd = FC_NS_GID_PN; - gid_pn->gid_pn_req.ct_hdr.ct_mr_size = ZFCP_FC_CT_SIZE_PAGE / 4; - gid_pn->gid_pn_req.gid_pn.fn_wwpn = port->wwpn; - - ret = zfcp_fsf_send_ct(&adapter->gs->ds, &gid_pn->ct, + fc_req->ct_els.port = port; + fc_req->ct_els.handler = zfcp_fc_complete; + fc_req->ct_els.handler_data = &completion; + fc_req->ct_els.req = &fc_req->sg_req; + fc_req->ct_els.resp = &fc_req->sg_rsp; + sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req)); + sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp)); + + zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr, + FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE); + gid_pn_req->gid_pn.fn_wwpn = port->wwpn; + + ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els, adapter->pool.gid_pn_req, ZFCP_FC_CTELS_TMO); if (!ret) { wait_for_completion(&completion); - zfcp_fc_ns_gid_pn_eval(gid_pn); + zfcp_fc_ns_gid_pn_eval(fc_req); } return ret; } /** - * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request + * zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request * @port: port where GID_PN request is needed * return: -ENOMEM on error, 0 otherwise */ static int zfcp_fc_ns_gid_pn(struct zfcp_port *port) { int ret; - struct zfcp_fc_gid_pn *gid_pn; + struct zfcp_fc_req *fc_req; struct zfcp_adapter *adapter = port->adapter; - gid_pn = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC); - if (!gid_pn) + fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC); + if (!fc_req) return -ENOMEM; - memset(gid_pn, 0, sizeof(*gid_pn)); + memset(fc_req, 0, sizeof(*fc_req)); ret = zfcp_fc_wka_port_get(&adapter->gs->ds); if (ret) goto out; - ret = zfcp_fc_ns_gid_pn_request(port, gid_pn); + ret = zfcp_fc_ns_gid_pn_request(port, fc_req); zfcp_fc_wka_port_put(&adapter->gs->ds); out: - mempool_free(gid_pn, adapter->pool.gid_pn); + mempool_free(fc_req, adapter->pool.gid_pn); return ret; } @@ -419,11 +420,11 @@ void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi) static void zfcp_fc_adisc_handler(void *data) { - struct zfcp_fc_els_adisc *adisc = data; - struct zfcp_port *port = adisc->els.port; - struct fc_els_adisc *adisc_resp = &adisc->adisc_resp; + struct zfcp_fc_req *fc_req = data; + struct zfcp_port *port = fc_req->ct_els.port; + struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp; - if (adisc->els.status) { + if (fc_req->ct_els.status) { /* request rejected or timed out */ zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "fcadh_1"); @@ -445,42 +446,42 @@ static void zfcp_fc_adisc_handler(void *data) out: atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->dev); - kmem_cache_free(zfcp_data.adisc_cache, adisc); + kmem_cache_free(zfcp_fc_req_cache, fc_req); } static int zfcp_fc_adisc(struct zfcp_port *port) { - struct zfcp_fc_els_adisc *adisc; + struct zfcp_fc_req *fc_req; struct zfcp_adapter *adapter = port->adapter; + struct Scsi_Host *shost = adapter->scsi_host; int ret; - adisc = kmem_cache_zalloc(zfcp_data.adisc_cache, GFP_ATOMIC); - if (!adisc) + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC); + if (!fc_req) return -ENOMEM; - adisc->els.port = port; - adisc->els.req = &adisc->req; - adisc->els.resp = &adisc->resp; - sg_init_one(adisc->els.req, &adisc->adisc_req, + fc_req->ct_els.port = port; + fc_req->ct_els.req = &fc_req->sg_req; + fc_req->ct_els.resp = &fc_req->sg_rsp; + sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req, sizeof(struct fc_els_adisc)); - sg_init_one(adisc->els.resp, &adisc->adisc_resp, + sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp, sizeof(struct fc_els_adisc)); - adisc->els.handler = zfcp_fc_adisc_handler; - adisc->els.handler_data = adisc; + fc_req->ct_els.handler = zfcp_fc_adisc_handler; + fc_req->ct_els.handler_data = fc_req; /* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports without FC-AL-2 capability, so we don't set it */ - adisc->adisc_req.adisc_wwpn = fc_host_port_name(adapter->scsi_host); - adisc->adisc_req.adisc_wwnn = fc_host_node_name(adapter->scsi_host); - adisc->adisc_req.adisc_cmd = ELS_ADISC; - hton24(adisc->adisc_req.adisc_port_id, - fc_host_port_id(adapter->scsi_host)); + fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost); + fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost); + fc_req->u.adisc.req.adisc_cmd = ELS_ADISC; + hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost)); - ret = zfcp_fsf_send_els(adapter, port->d_id, &adisc->els, + ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els, ZFCP_FC_CTELS_TMO); if (ret) - kmem_cache_free(zfcp_data.adisc_cache, adisc); + kmem_cache_free(zfcp_fc_req_cache, fc_req); return ret; } @@ -528,68 +529,42 @@ void zfcp_fc_test_link(struct zfcp_port *port) put_device(&port->dev); } -static void zfcp_free_sg_env(struct zfcp_fc_gpn_ft *gpn_ft, int buf_num) +static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num) { - struct scatterlist *sg = &gpn_ft->sg_req; - - kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg)); - zfcp_sg_free_table(gpn_ft->sg_resp, buf_num); - - kfree(gpn_ft); -} + struct zfcp_fc_req *fc_req; -static struct zfcp_fc_gpn_ft *zfcp_alloc_sg_env(int buf_num) -{ - struct zfcp_fc_gpn_ft *gpn_ft; - struct zfcp_fc_gpn_ft_req *req; - - gpn_ft = kzalloc(sizeof(*gpn_ft), GFP_KERNEL); - if (!gpn_ft) + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL); + if (!fc_req) return NULL; - req = kmem_cache_zalloc(zfcp_data.gpn_ft_cache, GFP_KERNEL); - if (!req) { - kfree(gpn_ft); - gpn_ft = NULL; - goto out; + if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) { + kmem_cache_free(zfcp_fc_req_cache, fc_req); + return NULL; } - sg_init_one(&gpn_ft->sg_req, req, sizeof(*req)); - if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) { - zfcp_free_sg_env(gpn_ft, buf_num); - gpn_ft = NULL; - } -out: - return gpn_ft; -} + sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req, + sizeof(struct zfcp_fc_gpn_ft_req)); + return fc_req; +} -static int zfcp_fc_send_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, +static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req, struct zfcp_adapter *adapter, int max_bytes) { - struct zfcp_fsf_ct_els *ct = &gpn_ft->ct; - struct zfcp_fc_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req); + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req; DECLARE_COMPLETION_ONSTACK(completion); int ret; - /* prepare CT IU for GPN_FT */ - req->ct_hdr.ct_rev = FC_CT_REV; - req->ct_hdr.ct_fs_type = FC_FST_DIR; - req->ct_hdr.ct_fs_subtype = FC_NS_SUBTYPE; - req->ct_hdr.ct_options = 0; - req->ct_hdr.ct_cmd = FC_NS_GPN_FT; - req->ct_hdr.ct_mr_size = max_bytes / 4; - req->gpn_ft.fn_domain_id_scope = 0; - req->gpn_ft.fn_area_id_scope = 0; + zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes); req->gpn_ft.fn_fc4_type = FC_TYPE_FCP; - /* prepare zfcp_send_ct */ - ct->handler = zfcp_fc_complete; - ct->handler_data = &completion; - ct->req = &gpn_ft->sg_req; - ct->resp = gpn_ft->sg_resp; + ct_els->handler = zfcp_fc_complete; + ct_els->handler_data = &completion; + ct_els->req = &fc_req->sg_req; + ct_els->resp = &fc_req->sg_rsp; - ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct, NULL, + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL, ZFCP_FC_CTELS_TMO); if (!ret) wait_for_completion(&completion); @@ -610,11 +585,11 @@ static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh) list_move_tail(&port->list, lh); } -static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, +static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req, struct zfcp_adapter *adapter, int max_entries) { - struct zfcp_fsf_ct_els *ct = &gpn_ft->ct; - struct scatterlist *sg = gpn_ft->sg_resp; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct scatterlist *sg = &fc_req->sg_rsp; struct fc_ct_hdr *hdr = sg_virt(sg); struct fc_gpn_ft_resp *acc = sg_virt(sg); struct zfcp_port *port, *tmp; @@ -623,7 +598,7 @@ static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_gpn_ft *gpn_ft, u32 d_id; int ret = 0, x, last = 0; - if (ct->status) + if (ct_els->status) return -EIO; if (hdr->ct_cmd != FC_FS_ACC) { @@ -687,7 +662,7 @@ void zfcp_fc_scan_ports(struct work_struct *work) struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, scan_work); int ret, i; - struct zfcp_fc_gpn_ft *gpn_ft; + struct zfcp_fc_req *fc_req; int chain, max_entries, buf_num, max_bytes; chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS; @@ -702,25 +677,145 @@ void zfcp_fc_scan_ports(struct work_struct *work) if (zfcp_fc_wka_port_get(&adapter->gs->ds)) return; - gpn_ft = zfcp_alloc_sg_env(buf_num); - if (!gpn_ft) + fc_req = zfcp_alloc_sg_env(buf_num); + if (!fc_req) goto out; for (i = 0; i < 3; i++) { - ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes); + ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes); if (!ret) { - ret = zfcp_fc_eval_gpn_ft(gpn_ft, adapter, max_entries); + ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries); if (ret == -EAGAIN) ssleep(1); else break; } } - zfcp_free_sg_env(gpn_ft, buf_num); + zfcp_sg_free_table(&fc_req->sg_rsp, buf_num); + kmem_cache_free(zfcp_fc_req_cache, fc_req); out: zfcp_fc_wka_port_put(&adapter->gs->ds); } +static int zfcp_fc_gspn(struct zfcp_adapter *adapter, + struct zfcp_fc_req *fc_req) +{ + DECLARE_COMPLETION_ONSTACK(completion); + char devno[] = "DEVNO:"; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req; + struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp; + int ret; + + zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID, + FC_SYMBOLIC_NAME_SIZE); + hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host)); + + sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req)); + sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp)); + + ct_els->handler = zfcp_fc_complete; + ct_els->handler_data = &completion; + ct_els->req = &fc_req->sg_req; + ct_els->resp = &fc_req->sg_rsp; + + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL, + ZFCP_FC_CTELS_TMO); + if (ret) + return ret; + + wait_for_completion(&completion); + if (ct_els->status) + return ct_els->status; + + if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV && + !(strstr(gspn_rsp->gspn.fp_name, devno))) + snprintf(fc_host_symbolic_name(adapter->scsi_host), + FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s", + gspn_rsp->gspn.fp_name, devno, + dev_name(&adapter->ccw_device->dev), + init_utsname()->nodename); + else + strlcpy(fc_host_symbolic_name(adapter->scsi_host), + gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE); + + return 0; +} + +static void zfcp_fc_rspn(struct zfcp_adapter *adapter, + struct zfcp_fc_req *fc_req) +{ + DECLARE_COMPLETION_ONSTACK(completion); + struct Scsi_Host *shost = adapter->scsi_host; + struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els; + struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req; + struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp; + int ret, len; + + zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID, + FC_SYMBOLIC_NAME_SIZE); + hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost)); + len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost), + FC_SYMBOLIC_NAME_SIZE); + rspn_req->rspn.fr_name_len = len; + + sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req)); + sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp)); + + ct_els->handler = zfcp_fc_complete; + ct_els->handler_data = &completion; + ct_els->req = &fc_req->sg_req; + ct_els->resp = &fc_req->sg_rsp; + + ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL, + ZFCP_FC_CTELS_TMO); + if (!ret) + wait_for_completion(&completion); +} + +/** + * zfcp_fc_sym_name_update - Retrieve and update the symbolic port name + * @work: ns_up_work of the adapter where to update the symbolic port name + * + * Retrieve the current symbolic port name that may have been set by + * the hardware using the GSPN request and update the fc_host + * symbolic_name sysfs attribute. When running in NPIV mode (and hence + * the port name is unique for this system), update the symbolic port + * name to add Linux specific information and update the FC nameserver + * using the RSPN request. + */ +void zfcp_fc_sym_name_update(struct work_struct *work) +{ + struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter, + ns_up_work); + int ret; + struct zfcp_fc_req *fc_req; + + if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT && + fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) + return; + + fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL); + if (!fc_req) + return; + + ret = zfcp_fc_wka_port_get(&adapter->gs->ds); + if (ret) + goto out_free; + + ret = zfcp_fc_gspn(adapter, fc_req); + if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV) + goto out_ds_put; + + memset(fc_req, 0, sizeof(*fc_req)); + zfcp_fc_rspn(adapter, fc_req); + +out_ds_put: + zfcp_fc_wka_port_put(&adapter->gs->ds); +out_free: + kmem_cache_free(zfcp_fc_req_cache, fc_req); +} + static void zfcp_fc_ct_els_job_handler(void *data) { struct fc_bsg_job *job = data; diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index b464ae01086c..4561f3bf7300 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -64,33 +64,16 @@ struct zfcp_fc_gid_pn_req { } __packed; /** - * struct zfcp_fc_gid_pn_resp - container for ct header plus gid_pn response + * struct zfcp_fc_gid_pn_rsp - container for ct header plus gid_pn response * @ct_hdr: FC GS common transport header * @gid_pn: GID_PN response */ -struct zfcp_fc_gid_pn_resp { +struct zfcp_fc_gid_pn_rsp { struct fc_ct_hdr ct_hdr; struct fc_gid_pn_resp gid_pn; } __packed; /** - * struct zfcp_fc_gid_pn - everything required in zfcp for gid_pn request - * @ct: data passed to zfcp_fsf for issuing fsf request - * @sg_req: scatterlist entry for request data - * @sg_resp: scatterlist entry for response data - * @gid_pn_req: GID_PN request data - * @gid_pn_resp: GID_PN response data - */ -struct zfcp_fc_gid_pn { - struct zfcp_fsf_ct_els ct; - struct scatterlist sg_req; - struct scatterlist sg_resp; - struct zfcp_fc_gid_pn_req gid_pn_req; - struct zfcp_fc_gid_pn_resp gid_pn_resp; - struct zfcp_port *port; -}; - -/** * struct zfcp_fc_gpn_ft - container for ct header plus gpn_ft request * @ct_hdr: FC GS common transport header * @gpn_ft: GPN_FT request @@ -101,41 +84,72 @@ struct zfcp_fc_gpn_ft_req { } __packed; /** - * struct zfcp_fc_gpn_ft_resp - container for ct header plus gpn_ft response + * struct zfcp_fc_gspn_req - container for ct header plus GSPN_ID request * @ct_hdr: FC GS common transport header - * @gpn_ft: Array of gpn_ft response data to fill one memory page + * @gspn: GSPN_ID request */ -struct zfcp_fc_gpn_ft_resp { +struct zfcp_fc_gspn_req { struct fc_ct_hdr ct_hdr; - struct fc_gpn_ft_resp gpn_ft[ZFCP_FC_GPN_FT_ENT_PAGE]; + struct fc_gid_pn_resp gspn; } __packed; /** - * struct zfcp_fc_gpn_ft - zfcp data for gpn_ft request - * @ct: data passed to zfcp_fsf for issuing fsf request - * @sg_req: scatter list entry for gpn_ft request - * @sg_resp: scatter list entries for gpn_ft responses (per memory page) + * struct zfcp_fc_gspn_rsp - container for ct header plus GSPN_ID response + * @ct_hdr: FC GS common transport header + * @gspn: GSPN_ID response + * @name: The name string of the GSPN_ID response */ -struct zfcp_fc_gpn_ft { - struct zfcp_fsf_ct_els ct; - struct scatterlist sg_req; - struct scatterlist sg_resp[ZFCP_FC_GPN_FT_NUM_BUFS]; -}; +struct zfcp_fc_gspn_rsp { + struct fc_ct_hdr ct_hdr; + struct fc_gspn_resp gspn; + char name[FC_SYMBOLIC_NAME_SIZE]; +} __packed; /** - * struct zfcp_fc_els_adisc - everything required in zfcp for issuing ELS ADISC - * @els: data required for issuing els fsf command - * @req: scatterlist entry for ELS ADISC request - * @resp: scatterlist entry for ELS ADISC response - * @adisc_req: ELS ADISC request data - * @adisc_resp: ELS ADISC response data + * struct zfcp_fc_rspn_req - container for ct header plus RSPN_ID request + * @ct_hdr: FC GS common transport header + * @rspn: RSPN_ID request + * @name: The name string of the RSPN_ID request */ -struct zfcp_fc_els_adisc { - struct zfcp_fsf_ct_els els; - struct scatterlist req; - struct scatterlist resp; - struct fc_els_adisc adisc_req; - struct fc_els_adisc adisc_resp; +struct zfcp_fc_rspn_req { + struct fc_ct_hdr ct_hdr; + struct fc_ns_rspn rspn; + char name[FC_SYMBOLIC_NAME_SIZE]; +} __packed; + +/** + * struct zfcp_fc_req - Container for FC ELS and CT requests sent from zfcp + * @ct_els: data required for issuing fsf command + * @sg_req: scatterlist entry for request data + * @sg_rsp: scatterlist entry for response data + * @u: request specific data + */ +struct zfcp_fc_req { + struct zfcp_fsf_ct_els ct_els; + struct scatterlist sg_req; + struct scatterlist sg_rsp; + union { + struct { + struct fc_els_adisc req; + struct fc_els_adisc rsp; + } adisc; + struct { + struct zfcp_fc_gid_pn_req req; + struct zfcp_fc_gid_pn_rsp rsp; + } gid_pn; + struct { + struct scatterlist sg_rsp2[ZFCP_FC_GPN_FT_NUM_BUFS - 1]; + struct zfcp_fc_gpn_ft_req req; + } gpn_ft; + struct { + struct zfcp_fc_gspn_req req; + struct zfcp_fc_gspn_rsp rsp; + } gspn; + struct { + struct zfcp_fc_rspn_req req; + struct fc_ct_hdr rsp; + } rspn; + } u; }; /** @@ -192,14 +206,21 @@ struct zfcp_fc_wka_ports { * zfcp_fc_scsi_to_fcp - setup FCP command with data from scsi_cmnd * @fcp: fcp_cmnd to setup * @scsi: scsi_cmnd where to get LUN, task attributes/flags and CDB + * @tm: task management flags to setup task management command */ static inline -void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) +void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi, + u8 tm_flags) { char tag[2]; int_to_scsilun(scsi->device->lun, (struct scsi_lun *) &fcp->fc_lun); + if (unlikely(tm_flags)) { + fcp->fc_tm_flags = tm_flags; + return; + } + if (scsi_populate_tag_msg(scsi, tag)) { switch (tag[0]) { case MSG_ORDERED_TAG: @@ -226,19 +247,6 @@ void zfcp_fc_scsi_to_fcp(struct fcp_cmnd *fcp, struct scsi_cmnd *scsi) } /** - * zfcp_fc_fcp_tm - setup FCP command as task management command - * @fcp: fcp_cmnd to setup - * @dev: scsi_device where to send the task management command - * @tm: task management flags to setup tm command - */ -static inline -void zfcp_fc_fcp_tm(struct fcp_cmnd *fcp, struct scsi_device *dev, u8 tm_flags) -{ - int_to_scsilun(dev->lun, (struct scsi_lun *) &fcp->fc_lun); - fcp->fc_tm_flags |= tm_flags; -} - -/** * zfcp_fc_evap_fcp_rsp - evaluate FCP RSP IU and update scsi_cmnd accordingly * @fcp_rsp: FCP RSP IU to evaluate * @scsi: SCSI command where to update status and sense buffer diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 60ff9d172c79..a0e05ef65924 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -18,6 +18,8 @@ #include "zfcp_qdio.h" #include "zfcp_reqlist.h" +struct kmem_cache *zfcp_fsf_qtcb_cache; + static void zfcp_fsf_request_timeout_handler(unsigned long data) { struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; @@ -83,7 +85,7 @@ void zfcp_fsf_req_free(struct zfcp_fsf_req *req) } if (likely(req->qtcb)) - kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb); + kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); kfree(req); } @@ -212,7 +214,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { zfcp_dbf_hba_fsf_uss("fssrh_1", req); - mempool_free(sr_buf, adapter->pool.status_read_data); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_req_free(req); return; } @@ -265,7 +267,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) break; } - mempool_free(sr_buf, adapter->pool.status_read_data); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_req_free(req); atomic_inc(&adapter->stat_miss); @@ -628,7 +630,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool) if (likely(pool)) qtcb = mempool_alloc(pool, GFP_ATOMIC); else - qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC); + qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC); if (unlikely(!qtcb)) return NULL; @@ -723,6 +725,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) struct zfcp_adapter *adapter = qdio->adapter; struct zfcp_fsf_req *req; struct fsf_status_read_buffer *sr_buf; + struct page *page; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -736,11 +739,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) goto out; } - sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); - if (!sr_buf) { + page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); + if (!page) { retval = -ENOMEM; goto failed_buf; } + sr_buf = page_address(page); memset(sr_buf, 0, sizeof(*sr_buf)); req->data = sr_buf; @@ -755,7 +759,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) failed_req_send: req->data = NULL; - mempool_free(sr_buf, adapter->pool.status_read_data); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); failed_buf: zfcp_dbf_hba_fsf_uss("fssr__1", req); zfcp_fsf_req_free(req); @@ -1552,7 +1556,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) SBAL_FLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); - if (unlikely(IS_ERR(req))) { + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } @@ -1605,7 +1609,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) SBAL_FLAGS0_TYPE_READ, qdio->adapter->pool.erp_req); - if (unlikely(IS_ERR(req))) { + if (IS_ERR(req)) { retval = PTR_ERR(req); goto out; } @@ -2206,7 +2210,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; - zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd); + zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); if (scsi_prot_sg_count(scsi_cmnd)) { zfcp_qdio_set_data_div(qdio, &req->qdio_req, @@ -2284,7 +2288,6 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, goto out; } - req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT; req->data = scmnd; req->handler = zfcp_fsf_fcp_task_mgmt_handler; req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; @@ -2296,7 +2299,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd, zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; - zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags); + zfcp_fc_scsi_to_fcp(fcp_cmnd, scmnd, tm_flags); zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); if (!zfcp_fsf_req_send(req)) diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index ddb5800823a9..2a4991d6d4d5 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -292,7 +292,37 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) return SUCCESS; } -int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) +struct scsi_transport_template *zfcp_scsi_transport_template; + +static struct scsi_host_template zfcp_scsi_host_template = { + .module = THIS_MODULE, + .name = "zfcp", + .queuecommand = zfcp_scsi_queuecommand, + .eh_abort_handler = zfcp_scsi_eh_abort_handler, + .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, + .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, + .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, + .slave_alloc = zfcp_scsi_slave_alloc, + .slave_configure = zfcp_scsi_slave_configure, + .slave_destroy = zfcp_scsi_slave_destroy, + .change_queue_depth = zfcp_scsi_change_queue_depth, + .proc_name = "zfcp", + .can_queue = 4096, + .this_id = -1, + .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ, + .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8), + .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, + .cmd_per_lun = 1, + .use_clustering = 1, + .shost_attrs = zfcp_sysfs_shost_attrs, + .sdev_attrs = zfcp_sysfs_sdev_attrs, +}; + +/** + * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer + * @adapter: The zfcp adapter to register with the SCSI midlayer + */ +int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter) { struct ccw_dev_id dev_id; @@ -301,7 +331,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) ccw_device_get_id(adapter->ccw_device, &dev_id); /* register adapter as SCSI host with mid layer of SCSI stack */ - adapter->scsi_host = scsi_host_alloc(&zfcp_data.scsi_host_template, + adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template, sizeof (struct zfcp_adapter *)); if (!adapter->scsi_host) { dev_err(&adapter->ccw_device->dev, @@ -316,7 +346,7 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) adapter->scsi_host->max_channel = 0; adapter->scsi_host->unique_id = dev_id.devno; adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ - adapter->scsi_host->transportt = zfcp_data.scsi_transport_template; + adapter->scsi_host->transportt = zfcp_scsi_transport_template; adapter->scsi_host->hostdata[0] = (unsigned long) adapter; @@ -328,7 +358,11 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter) return 0; } -void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) +/** + * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer + * @adapter: The zfcp adapter to unregister. + */ +void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter) { struct Scsi_Host *shost; struct zfcp_port *port; @@ -346,8 +380,6 @@ void zfcp_adapter_scsi_unregister(struct zfcp_adapter *adapter) scsi_remove_host(shost); scsi_host_put(shost); adapter->scsi_host = NULL; - - return; } static struct fc_host_statistics* @@ -688,33 +720,8 @@ struct fc_function_template zfcp_transport_functions = { /* no functions registered for following dynamic attributes but directly set by LLDD */ .show_host_port_type = 1, + .show_host_symbolic_name = 1, .show_host_speed = 1, .show_host_port_id = 1, .dd_bsg_size = sizeof(struct zfcp_fsf_ct_els), }; - -struct zfcp_data zfcp_data = { - .scsi_host_template = { - .name = "zfcp", - .module = THIS_MODULE, - .proc_name = "zfcp", - .change_queue_depth = zfcp_scsi_change_queue_depth, - .slave_alloc = zfcp_scsi_slave_alloc, - .slave_configure = zfcp_scsi_slave_configure, - .slave_destroy = zfcp_scsi_slave_destroy, - .queuecommand = zfcp_scsi_queuecommand, - .eh_abort_handler = zfcp_scsi_eh_abort_handler, - .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, - .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, - .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, - .can_queue = 4096, - .this_id = -1, - .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ, - .cmd_per_lun = 1, - .use_clustering = 1, - .sdev_attrs = zfcp_sysfs_sdev_attrs, - .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8), - .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, - .shost_attrs = zfcp_sysfs_shost_attrs, - }, -}; diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8616496ffc02..4a1f029c4fe9 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -381,6 +381,7 @@ config ISCSI_BOOT_SYSFS source "drivers/scsi/cxgbi/Kconfig" source "drivers/scsi/bnx2i/Kconfig" +source "drivers/scsi/bnx2fc/Kconfig" source "drivers/scsi/be2iscsi/Kconfig" config SGIWD93_SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index ef6de669424b..7ad0b8a79ae8 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -40,6 +40,7 @@ obj-$(CONFIG_LIBFC) += libfc/ obj-$(CONFIG_LIBFCOE) += fcoe/ obj-$(CONFIG_FCOE) += fcoe/ obj-$(CONFIG_FCOE_FNIC) += fnic/ +obj-$(CONFIG_SCSI_BNX2X_FCOE) += libfc/ fcoe/ bnx2fc/ obj-$(CONFIG_ISCSI_TCP) += libiscsi.o libiscsi_tcp.o iscsi_tcp.o obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o obj-$(CONFIG_ISCSI_BOOT_SYSFS) += iscsi_boot_sysfs.o diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 9a5629f94f95..e7cd2fcbe036 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -936,8 +936,7 @@ static void NCR5380_exit(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; - cancel_delayed_work(&hostdata->coroutine); - flush_scheduled_work(); + cancel_delayed_work_sync(&hostdata->coroutine); } /** diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h index be5558ab84ea..95ee50385188 100644 --- a/drivers/scsi/aic7xxx/aic79xx.h +++ b/drivers/scsi/aic7xxx/aic79xx.h @@ -672,7 +672,7 @@ struct scb_data { /************************ Target Mode Definitions *****************************/ /* - * Connection desciptor for select-in requests in target mode. + * Connection descriptor for select-in requests in target mode. */ struct target_cmd { uint8_t scsiid; /* Our ID and the initiator's ID */ diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h index e4e651cca3e4..17444bc18bca 100644 --- a/drivers/scsi/aic7xxx/aic7xxx.h +++ b/drivers/scsi/aic7xxx/aic7xxx.h @@ -618,7 +618,7 @@ struct scb_data { /************************ Target Mode Definitions *****************************/ /* - * Connection desciptor for select-in requests in target mode. + * Connection descriptor for select-in requests in target mode. */ struct target_cmd { uint8_t scsiid; /* Our ID and the initiator's ID */ diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index 3f5a542a7793..e021b4812d58 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -4780,7 +4780,7 @@ ahc_init_scbdata(struct ahc_softc *ahc) SLIST_INIT(&scb_data->sg_maps); /* Allocate SCB resources */ - scb_data->scbarray = (struct scb *)kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC); + scb_data->scbarray = kmalloc(sizeof(struct scb) * AHC_SCB_MAX_ALLOC, GFP_ATOMIC); if (scb_data->scbarray == NULL) return (ENOMEM); memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX_ALLOC); diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 984bd527c6c9..da7b9887ec48 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -1020,7 +1020,7 @@ static void arcmsr_remove(struct pci_dev *pdev) int poll_count = 0; arcmsr_free_sysfs_attr(acb); scsi_remove_host(host); - flush_scheduled_work(); + flush_work_sync(&acb->arcmsr_do_message_isr_bh); del_timer_sync(&acb->eternal_timer); arcmsr_disable_outbound_ints(acb); arcmsr_stop_adapter_bgrb(acb); @@ -1066,7 +1066,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev) (struct AdapterControlBlock *)host->hostdata; del_timer_sync(&acb->eternal_timer); arcmsr_disable_outbound_ints(acb); - flush_scheduled_work(); + flush_work_sync(&acb->arcmsr_do_message_isr_bh); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); } diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index eaaa8813067d..868cc5590145 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -210,28 +210,20 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, } /** - * beiscsi_conn_get_param - get the iscsi parameter - * @cls_conn: pointer to iscsi cls conn + * beiscsi_ep_get_param - get the iscsi parameter + * @ep: pointer to iscsi ep * @param: parameter type identifier * @buf: buffer pointer * * returns iscsi parameter */ -int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, +int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { - struct beiscsi_endpoint *beiscsi_ep; - struct iscsi_conn *conn = cls_conn->dd_data; - struct beiscsi_conn *beiscsi_conn = conn->dd_data; + struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; int len = 0; SE_DEBUG(DBG_LVL_8, "In beiscsi_conn_get_param, param= %d\n", param); - beiscsi_ep = beiscsi_conn->ep; - if (!beiscsi_ep) { - SE_DEBUG(DBG_LVL_1, - "In beiscsi_conn_get_param , no beiscsi_ep\n"); - return -ENODEV; - } switch (param) { case ISCSI_PARAM_CONN_PORT: @@ -244,7 +236,7 @@ int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr); break; default: - return iscsi_conn_get_param(cls_conn, param, buf); + return -ENOSYS; } return len; } diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index 8950a702b9f4..9c532797c29e 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -48,8 +48,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_fd, int is_leading); -int beiscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf); +int beiscsi_ep_get_param(struct iscsi_endpoint *ep, enum iscsi_param param, + char *buf); int beiscsi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 638c72b7f94a..24e20ba9633c 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -4384,7 +4384,7 @@ struct iscsi_transport beiscsi_iscsi_transport = { .bind_conn = beiscsi_conn_bind, .destroy_conn = iscsi_conn_teardown, .set_param = beiscsi_set_param, - .get_conn_param = beiscsi_conn_get_param, + .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, .get_host_param = beiscsi_get_host_param, .start_conn = beiscsi_conn_start, @@ -4395,6 +4395,7 @@ struct iscsi_transport beiscsi_iscsi_transport = { .alloc_pdu = beiscsi_alloc_pdu, .parse_pdu_itt = beiscsi_parse_pdu, .get_stats = beiscsi_conn_get_stats, + .get_ep_param = beiscsi_ep_get_param, .ep_connect = beiscsi_ep_connect, .ep_poll = beiscsi_ep_poll, .ep_disconnect = beiscsi_ep_disconnect, diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h new file mode 100644 index 000000000000..69d031d98469 --- /dev/null +++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h @@ -0,0 +1,1080 @@ +#ifndef __57XX_FCOE_HSI_LINUX_LE__ +#define __57XX_FCOE_HSI_LINUX_LE__ + +/* + * common data for all protocols + */ +struct b577xx_doorbell_hdr { + u8 header; +#define B577XX_DOORBELL_HDR_RX (0x1<<0) +#define B577XX_DOORBELL_HDR_RX_SHIFT 0 +#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1) +#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1 +#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2) +#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2 +#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4) +#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +/* + * doorbell message sent to the chip + */ +struct b577xx_doorbell_set_prod { +#if defined(__BIG_ENDIAN) + u16 prod; + u8 zero_fill1; + struct b577xx_doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct b577xx_doorbell_hdr header; + u8 zero_fill1; + u16 prod; +#endif +}; + + +struct regpair { + __le32 lo; + __le32 hi; +}; + + +/* + * Fixed size structure in order to plant it in Union structure + */ +struct fcoe_abts_rsp_union { + u32 r_ctl; + u32 abts_rsp_payload[7]; +}; + + +/* + * 4 regs size + */ +struct fcoe_bd_ctx { + u32 buf_addr_hi; + u32 buf_addr_lo; +#if defined(__BIG_ENDIAN) + u16 rsrv0; + u16 buf_len; +#elif defined(__LITTLE_ENDIAN) + u16 buf_len; + u16 rsrv0; +#endif +#if defined(__BIG_ENDIAN) + u16 rsrv1; + u16 flags; +#elif defined(__LITTLE_ENDIAN) + u16 flags; + u16 rsrv1; +#endif +}; + + +struct fcoe_cleanup_flow_info { +#if defined(__BIG_ENDIAN) + u16 reserved1; + u16 task_id; +#elif defined(__LITTLE_ENDIAN) + u16 task_id; + u16 reserved1; +#endif + u32 reserved2[7]; +}; + + +struct fcoe_fcp_cmd_payload { + u32 opaque[8]; +}; + +struct fcoe_fc_hdr { +#if defined(__BIG_ENDIAN) + u8 cs_ctl; + u8 s_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 s_id[3]; + u8 cs_ctl; +#endif +#if defined(__BIG_ENDIAN) + u8 r_ctl; + u8 d_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 d_id[3]; + u8 r_ctl; +#endif +#if defined(__BIG_ENDIAN) + u8 seq_id; + u8 df_ctl; + u16 seq_cnt; +#elif defined(__LITTLE_ENDIAN) + u16 seq_cnt; + u8 df_ctl; + u8 seq_id; +#endif +#if defined(__BIG_ENDIAN) + u8 type; + u8 f_ctl[3]; +#elif defined(__LITTLE_ENDIAN) + u8 f_ctl[3]; + u8 type; +#endif + u32 parameters; +#if defined(__BIG_ENDIAN) + u16 ox_id; + u16 rx_id; +#elif defined(__LITTLE_ENDIAN) + u16 rx_id; + u16 ox_id; +#endif +}; + +struct fcoe_fc_frame { + struct fcoe_fc_hdr fc_hdr; + u32 reserved0[2]; +}; + +union fcoe_cmd_flow_info { + struct fcoe_fcp_cmd_payload fcp_cmd_payload; + struct fcoe_fc_frame mp_fc_frame; +}; + + + +struct fcoe_fcp_rsp_flags { + u8 flags; +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4) +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5) +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 +}; + + +struct fcoe_fcp_rsp_payload { + struct regpair reserved0; + u32 fcp_resid; +#if defined(__BIG_ENDIAN) + u16 retry_delay_timer; + struct fcoe_fcp_rsp_flags fcp_flags; + u8 scsi_status_code; +#elif defined(__LITTLE_ENDIAN) + u8 scsi_status_code; + struct fcoe_fcp_rsp_flags fcp_flags; + u16 retry_delay_timer; +#endif + u32 fcp_rsp_len; + u32 fcp_sns_len; +}; + + +/* + * Fixed size structure in order to plant it in Union structure + */ +struct fcoe_fcp_rsp_union { + struct fcoe_fcp_rsp_payload payload; + struct regpair reserved0; +}; + + +struct fcoe_fcp_xfr_rdy_payload { + u32 burst_len; + u32 data_ro; +}; + +struct fcoe_read_flow_info { + struct fcoe_fc_hdr fc_data_in_hdr; + u32 reserved[2]; +}; + +struct fcoe_write_flow_info { + struct fcoe_fc_hdr fc_data_out_hdr; + struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload; +}; + +union fcoe_rsp_flow_info { + struct fcoe_fcp_rsp_union fcp_rsp; + struct fcoe_abts_rsp_union abts_rsp; +}; + +/* + * 32 bytes used for general purposes + */ +union fcoe_general_task_ctx { + union fcoe_cmd_flow_info cmd_info; + struct fcoe_read_flow_info read_info; + struct fcoe_write_flow_info write_info; + union fcoe_rsp_flow_info rsp_info; + struct fcoe_cleanup_flow_info cleanup_info; + u32 comp_info[8]; +}; + + +/* + * FCoE KCQ CQE parameters + */ +union fcoe_kcqe_params { + u32 reserved0[4]; +}; + +/* + * FCoE KCQ CQE + */ +struct fcoe_kcqe { + u32 fcoe_conn_id; + u32 completion_status; + u32 fcoe_conn_context_id; + union fcoe_kcqe_params params; +#if defined(__BIG_ENDIAN) + u8 flags; +#define FCOE_KCQE_RESERVED0 (0x7<<0) +#define FCOE_KCQE_RESERVED0_SHIFT 0 +#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) +#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 +#define FCOE_KCQE_LAYER_CODE (0x7<<4) +#define FCOE_KCQE_LAYER_CODE_SHIFT 4 +#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) +#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 + u8 op_code; + u16 qe_self_seq; +#elif defined(__LITTLE_ENDIAN) + u16 qe_self_seq; + u8 op_code; + u8 flags; +#define FCOE_KCQE_RESERVED0 (0x7<<0) +#define FCOE_KCQE_RESERVED0_SHIFT 0 +#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) +#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 +#define FCOE_KCQE_LAYER_CODE (0x7<<4) +#define FCOE_KCQE_LAYER_CODE_SHIFT 4 +#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) +#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 +#endif +}; + +/* + * FCoE KWQE header + */ +struct fcoe_kwqe_header { +#if defined(__BIG_ENDIAN) + u8 flags; +#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) +#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 +#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) +#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 + u8 op_code; +#elif defined(__LITTLE_ENDIAN) + u8 op_code; + u8 flags; +#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) +#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 +#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) +#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 +#endif +}; + +/* + * FCoE firmware init request 1 + */ +struct fcoe_kwqe_init1 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 num_tasks; +#elif defined(__LITTLE_ENDIAN) + u16 num_tasks; + struct fcoe_kwqe_header hdr; +#endif + u32 task_list_pbl_addr_lo; + u32 task_list_pbl_addr_hi; + u32 dummy_buffer_addr_lo; + u32 dummy_buffer_addr_hi; +#if defined(__BIG_ENDIAN) + u16 rq_num_wqes; + u16 sq_num_wqes; +#elif defined(__LITTLE_ENDIAN) + u16 sq_num_wqes; + u16 rq_num_wqes; +#endif +#if defined(__BIG_ENDIAN) + u16 cq_num_wqes; + u16 rq_buffer_log_size; +#elif defined(__LITTLE_ENDIAN) + u16 rq_buffer_log_size; + u16 cq_num_wqes; +#endif +#if defined(__BIG_ENDIAN) + u8 flags; +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 +#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) +#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 + u8 num_sessions_log; + u16 mtu; +#elif defined(__LITTLE_ENDIAN) + u16 mtu; + u8 num_sessions_log; + u8 flags; +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 +#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) +#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 +#endif +}; + +/* + * FCoE firmware init request 2 + */ +struct fcoe_kwqe_init2 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u32 hash_tbl_pbl_addr_lo; + u32 hash_tbl_pbl_addr_hi; + u32 t2_hash_tbl_addr_lo; + u32 t2_hash_tbl_addr_hi; + u32 t2_ptr_hash_tbl_addr_lo; + u32 t2_ptr_hash_tbl_addr_hi; + u32 free_list_count; +}; + +/* + * FCoE firmware init request 3 + */ +struct fcoe_kwqe_init3 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u32 error_bit_map_lo; + u32 error_bit_map_hi; +#if defined(__BIG_ENDIAN) + u8 reserved21[3]; + u8 cached_session_enable; +#elif defined(__LITTLE_ENDIAN) + u8 cached_session_enable; + u8 reserved21[3]; +#endif + u32 reserved2[4]; +}; + +/* + * FCoE connection offload request 1 + */ +struct fcoe_kwqe_conn_offload1 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 fcoe_conn_id; +#elif defined(__LITTLE_ENDIAN) + u16 fcoe_conn_id; + struct fcoe_kwqe_header hdr; +#endif + u32 sq_addr_lo; + u32 sq_addr_hi; + u32 rq_pbl_addr_lo; + u32 rq_pbl_addr_hi; + u32 rq_first_pbe_addr_lo; + u32 rq_first_pbe_addr_hi; +#if defined(__BIG_ENDIAN) + u16 reserved0; + u16 rq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 rq_prod; + u16 reserved0; +#endif +}; + +/* + * FCoE connection offload request 2 + */ +struct fcoe_kwqe_conn_offload2 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 tx_max_fc_pay_len; +#elif defined(__LITTLE_ENDIAN) + u16 tx_max_fc_pay_len; + struct fcoe_kwqe_header hdr; +#endif + u32 cq_addr_lo; + u32 cq_addr_hi; + u32 xferq_addr_lo; + u32 xferq_addr_hi; + u32 conn_db_addr_lo; + u32 conn_db_addr_hi; + u32 reserved1; +}; + +/* + * FCoE connection offload request 3 + */ +struct fcoe_kwqe_conn_offload3 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 vlan_tag; +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) +#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 +#elif defined(__LITTLE_ENDIAN) + u16 vlan_tag; +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) +#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 + struct fcoe_kwqe_header hdr; +#endif +#if defined(__BIG_ENDIAN) + u8 tx_max_conc_seqs_c3; + u8 s_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 s_id[3]; + u8 tx_max_conc_seqs_c3; +#endif +#if defined(__BIG_ENDIAN) + u8 flags; +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 + u8 d_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 d_id[3]; + u8 flags; +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 +#endif + u32 reserved; + u32 confq_first_pbe_addr_lo; + u32 confq_first_pbe_addr_hi; +#if defined(__BIG_ENDIAN) + u16 rx_max_fc_pay_len; + u16 tx_total_conc_seqs; +#elif defined(__LITTLE_ENDIAN) + u16 tx_total_conc_seqs; + u16 rx_max_fc_pay_len; +#endif +#if defined(__BIG_ENDIAN) + u8 rx_open_seqs_exch_c3; + u8 rx_max_conc_seqs_c3; + u16 rx_total_conc_seqs; +#elif defined(__LITTLE_ENDIAN) + u16 rx_total_conc_seqs; + u8 rx_max_conc_seqs_c3; + u8 rx_open_seqs_exch_c3; +#endif +}; + +/* + * FCoE connection offload request 4 + */ +struct fcoe_kwqe_conn_offload4 { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u8 reserved2; + u8 e_d_tov_timer_val; +#elif defined(__LITTLE_ENDIAN) + u8 e_d_tov_timer_val; + u8 reserved2; + struct fcoe_kwqe_header hdr; +#endif + u8 src_mac_addr_lo32[4]; +#if defined(__BIG_ENDIAN) + u8 dst_mac_addr_hi16[2]; + u8 src_mac_addr_hi16[2]; +#elif defined(__LITTLE_ENDIAN) + u8 src_mac_addr_hi16[2]; + u8 dst_mac_addr_hi16[2]; +#endif + u8 dst_mac_addr_lo32[4]; + u32 lcq_addr_lo; + u32 lcq_addr_hi; + u32 confq_pbl_base_addr_lo; + u32 confq_pbl_base_addr_hi; +}; + +/* + * FCoE connection enable request + */ +struct fcoe_kwqe_conn_enable_disable { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u8 src_mac_addr_lo32[4]; +#if defined(__BIG_ENDIAN) + u16 vlan_tag; +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 + u8 src_mac_addr_hi16[2]; +#elif defined(__LITTLE_ENDIAN) + u8 src_mac_addr_hi16[2]; + u16 vlan_tag; +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 +#endif + u8 dst_mac_addr_lo32[4]; +#if defined(__BIG_ENDIAN) + u16 reserved1; + u8 dst_mac_addr_hi16[2]; +#elif defined(__LITTLE_ENDIAN) + u8 dst_mac_addr_hi16[2]; + u16 reserved1; +#endif +#if defined(__BIG_ENDIAN) + u8 vlan_flag; + u8 s_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 s_id[3]; + u8 vlan_flag; +#endif +#if defined(__BIG_ENDIAN) + u8 reserved3; + u8 d_id[3]; +#elif defined(__LITTLE_ENDIAN) + u8 d_id[3]; + u8 reserved3; +#endif + u32 context_id; + u32 conn_id; + u32 reserved4; +}; + +/* + * FCoE connection destroy request + */ +struct fcoe_kwqe_conn_destroy { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u32 context_id; + u32 conn_id; + u32 reserved1[5]; +}; + +/* + * FCoe destroy request + */ +struct fcoe_kwqe_destroy { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u32 reserved1[7]; +}; + +/* + * FCoe statistics request + */ +struct fcoe_kwqe_stat { +#if defined(__BIG_ENDIAN) + struct fcoe_kwqe_header hdr; + u16 reserved0; +#elif defined(__LITTLE_ENDIAN) + u16 reserved0; + struct fcoe_kwqe_header hdr; +#endif + u32 stat_params_addr_lo; + u32 stat_params_addr_hi; + u32 reserved1[5]; +}; + +/* + * FCoE KWQ WQE + */ +union fcoe_kwqe { + struct fcoe_kwqe_init1 init1; + struct fcoe_kwqe_init2 init2; + struct fcoe_kwqe_init3 init3; + struct fcoe_kwqe_conn_offload1 conn_offload1; + struct fcoe_kwqe_conn_offload2 conn_offload2; + struct fcoe_kwqe_conn_offload3 conn_offload3; + struct fcoe_kwqe_conn_offload4 conn_offload4; + struct fcoe_kwqe_conn_enable_disable conn_enable_disable; + struct fcoe_kwqe_conn_destroy conn_destroy; + struct fcoe_kwqe_destroy destroy; + struct fcoe_kwqe_stat statistics; +}; + +struct fcoe_mul_sges_ctx { + struct regpair cur_sge_addr; +#if defined(__BIG_ENDIAN) + u8 sgl_size; + u8 cur_sge_idx; + u16 cur_sge_off; +#elif defined(__LITTLE_ENDIAN) + u16 cur_sge_off; + u8 cur_sge_idx; + u8 sgl_size; +#endif +}; + +struct fcoe_s_stat_ctx { + u8 flags; +#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) +#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0 +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1) +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1 +#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2) +#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2 +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3) +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3 +#define FCOE_S_STAT_CTX_P_RJT (0x1<<4) +#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4 +#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5) +#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5 +#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6) +#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 +}; + +struct fcoe_seq_ctx { +#if defined(__BIG_ENDIAN) + u16 low_seq_cnt; + struct fcoe_s_stat_ctx s_stat; + u8 seq_id; +#elif defined(__LITTLE_ENDIAN) + u8 seq_id; + struct fcoe_s_stat_ctx s_stat; + u16 low_seq_cnt; +#endif +#if defined(__BIG_ENDIAN) + u16 err_seq_cnt; + u16 high_seq_cnt; +#elif defined(__LITTLE_ENDIAN) + u16 high_seq_cnt; + u16 err_seq_cnt; +#endif + u32 low_exp_ro; + u32 high_exp_ro; +}; + + +struct fcoe_single_sge_ctx { + struct regpair cur_buf_addr; +#if defined(__BIG_ENDIAN) + u16 reserved0; + u16 cur_buf_rem; +#elif defined(__LITTLE_ENDIAN) + u16 cur_buf_rem; + u16 reserved0; +#endif +}; + +union fcoe_sgl_ctx { + struct fcoe_single_sge_ctx single_sge; + struct fcoe_mul_sges_ctx mul_sges; +}; + + + +/* + * FCoE SQ element + */ +struct fcoe_sqe { + u16 wqe; +#define FCOE_SQE_TASK_ID (0x7FFF<<0) +#define FCOE_SQE_TASK_ID_SHIFT 0 +#define FCOE_SQE_TOGGLE_BIT (0x1<<15) +#define FCOE_SQE_TOGGLE_BIT_SHIFT 15 +}; + + + +struct fcoe_task_ctx_entry_tx_only { + union fcoe_sgl_ctx sgl_ctx; +}; + +struct fcoe_task_ctx_entry_txwr_rxrd { +#if defined(__BIG_ENDIAN) + u16 verify_tx_seq; + u8 init_flags; +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6 + u8 tx_flags; +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4 +#elif defined(__LITTLE_ENDIAN) + u8 tx_flags; +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4 + u8 init_flags; +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5 +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6) +#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6 + u16 verify_tx_seq; +#endif +}; + +/* + * Common section. Both TX and RX processing might write and read from it in + * different flows + */ +struct fcoe_task_ctx_entry_tx_rx_cmn { + u32 data_2_trns; + union fcoe_general_task_ctx general; +#if defined(__BIG_ENDIAN) + u16 tx_low_seq_cnt; + struct fcoe_s_stat_ctx tx_s_stat; + u8 tx_seq_id; +#elif defined(__LITTLE_ENDIAN) + u8 tx_seq_id; + struct fcoe_s_stat_ctx tx_s_stat; + u16 tx_low_seq_cnt; +#endif + u32 common_flags; +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28 +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29) +#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29 +}; + +struct fcoe_task_ctx_entry_rxwr_txrd { +#if defined(__BIG_ENDIAN) + u16 rx_id; + u16 rx_flags; +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9 +#elif defined(__LITTLE_ENDIAN) + u16 rx_flags; +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8 +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9) +#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9 + u16 rx_id; +#endif +}; + +struct fcoe_task_ctx_entry_rx_only { + struct fcoe_seq_ctx seq_ctx; + struct fcoe_seq_ctx ooo_seq_ctx; + u32 rsrv3; + union fcoe_sgl_ctx sgl_ctx; +}; + +struct fcoe_task_ctx_entry { + struct fcoe_task_ctx_entry_tx_only tx_wr_only; + struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd; + struct fcoe_task_ctx_entry_tx_rx_cmn cmn; + struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd; + struct fcoe_task_ctx_entry_rx_only rx_wr_only; + u32 reserved[4]; +}; + + +/* + * FCoE XFRQ element + */ +struct fcoe_xfrqe { + u16 wqe; +#define FCOE_XFRQE_TASK_ID (0x7FFF<<0) +#define FCOE_XFRQE_TASK_ID_SHIFT 0 +#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) +#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * FCoE CONFQ element + */ +struct fcoe_confqe { +#if defined(__BIG_ENDIAN) + u16 rx_id; + u16 ox_id; +#elif defined(__LITTLE_ENDIAN) + u16 ox_id; + u16 rx_id; +#endif + u32 param; +}; + + +/* + * FCoE conection data base + */ +struct fcoe_conn_db { +#if defined(__BIG_ENDIAN) + u16 rsrv0; + u16 rq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 rq_prod; + u16 rsrv0; +#endif + u32 rsrv1; + struct regpair cq_arm; +}; + + +/* + * FCoE CQ element + */ +struct fcoe_cqe { + u16 wqe; +#define FCOE_CQE_CQE_INFO (0x3FFF<<0) +#define FCOE_CQE_CQE_INFO_SHIFT 0 +#define FCOE_CQE_CQE_TYPE (0x1<<14) +#define FCOE_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_CQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * FCoE error/warning resporting entry + */ +struct fcoe_err_report_entry { + u32 err_warn_bitmap_lo; + u32 err_warn_bitmap_hi; + u32 tx_buf_off; + u32 rx_buf_off; + struct fcoe_fc_hdr fc_hdr; +}; + + +/* + * FCoE hash table entry (32 bytes) + */ +struct fcoe_hash_table_entry { +#if defined(__BIG_ENDIAN) + u8 d_id_0; + u8 s_id_2; + u8 s_id_1; + u8 s_id_0; +#elif defined(__LITTLE_ENDIAN) + u8 s_id_0; + u8 s_id_1; + u8 s_id_2; + u8 d_id_0; +#endif +#if defined(__BIG_ENDIAN) + u16 dst_mac_addr_hi; + u8 d_id_2; + u8 d_id_1; +#elif defined(__LITTLE_ENDIAN) + u8 d_id_1; + u8 d_id_2; + u16 dst_mac_addr_hi; +#endif + u32 dst_mac_addr_lo; +#if defined(__BIG_ENDIAN) + u16 vlan_id; + u16 src_mac_addr_hi; +#elif defined(__LITTLE_ENDIAN) + u16 src_mac_addr_hi; + u16 vlan_id; +#endif + u32 src_mac_addr_lo; +#if defined(__BIG_ENDIAN) + u16 reserved1; + u8 reserved0; + u8 vlan_flag; +#elif defined(__LITTLE_ENDIAN) + u8 vlan_flag; + u8 reserved0; + u16 reserved1; +#endif + u32 reserved2; + u32 field_id; +#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0) +#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0 +#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24) +#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24 +#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31) +#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31 +}; + +/* + * FCoE pending work request CQE + */ +struct fcoe_pend_wq_cqe { + u16 wqe; +#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0) +#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0 +#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14) +#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * FCoE RX statistics parameters section#0 + */ +struct fcoe_rx_stat_params_section0 { + u32 fcoe_ver_cnt; + u32 fcoe_rx_pkt_cnt; + u32 fcoe_rx_byte_cnt; + u32 fcoe_rx_drop_pkt_cnt; +}; + + +/* + * FCoE RX statistics parameters section#1 + */ +struct fcoe_rx_stat_params_section1 { + u32 fc_crc_cnt; + u32 eofa_del_cnt; + u32 miss_frame_cnt; + u32 seq_timeout_cnt; + u32 drop_seq_cnt; + u32 fcoe_rx_drop_pkt_cnt; + u32 fcp_rx_pkt_cnt; + u32 reserved0; +}; + + +/* + * FCoE TX statistics parameters + */ +struct fcoe_tx_stat_params { + u32 fcoe_tx_pkt_cnt; + u32 fcoe_tx_byte_cnt; + u32 fcp_tx_pkt_cnt; + u32 reserved0; +}; + +/* + * FCoE statistics parameters + */ +struct fcoe_statistics_params { + struct fcoe_tx_stat_params tx_stat; + struct fcoe_rx_stat_params_section0 rx_stat0; + struct fcoe_rx_stat_params_section1 rx_stat1; +}; + + +/* + * FCoE t2 hash table entry (64 bytes) + */ +struct fcoe_t2_hash_table_entry { + struct fcoe_hash_table_entry data; + struct regpair next; + struct regpair reserved0[3]; +}; + +/* + * FCoE unsolicited CQE + */ +struct fcoe_unsolicited_cqe { + u16 wqe; +#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0) +#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0 +#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2) +#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2 +#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14) +#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15 +}; + + + +#endif /* __57XX_FCOE_HSI_LINUX_LE__ */ diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig new file mode 100644 index 000000000000..6a38080e35ed --- /dev/null +++ b/drivers/scsi/bnx2fc/Kconfig @@ -0,0 +1,11 @@ +config SCSI_BNX2X_FCOE + tristate "Broadcom NetXtreme II FCoE support" + depends on PCI + select NETDEVICES + select NETDEV_1000 + select LIBFC + select LIBFCOE + select CNIC + ---help--- + This driver supports FCoE offload for the Broadcom NetXtreme II + devices. diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile new file mode 100644 index 000000000000..a92695a25176 --- /dev/null +++ b/drivers/scsi/bnx2fc/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o + +bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h new file mode 100644 index 000000000000..df2fc09ba479 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -0,0 +1,511 @@ +#ifndef _BNX2FC_H_ +#define _BNX2FC_H_ +/* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/kthread.h> +#include <linux/crc32.h> +#include <linux/cpu.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/dma-mapping.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/bitops.h> +#include <linux/log2.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/io.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_tcq.h> +#include <scsi/libfc.h> +#include <scsi/libfcoe.h> +#include <scsi/fc_encode.h> +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fip.h> +#include <scsi/fc/fc_fc2.h> +#include <scsi/fc_frame.h> +#include <scsi/fc/fc_fcoe.h> +#include <scsi/fc/fc_fcp.h> + +#include "57xx_hsi_bnx2fc.h" +#include "bnx2fc_debug.h" +#include "../../net/cnic_if.h" +#include "bnx2fc_constants.h" + +#define BNX2FC_NAME "bnx2fc" +#define BNX2FC_VERSION "1.0.0" + +#define PFX "bnx2fc: " + +#define BNX2X_DOORBELL_PCI_BAR 2 + +#define BNX2FC_MAX_BD_LEN 0xffff +#define BNX2FC_BD_SPLIT_SZ 0x8000 +#define BNX2FC_MAX_BDS_PER_CMD 256 + +#define BNX2FC_SQ_WQES_MAX 256 + +#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8) +#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2) +#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1) + +#define BNX2FC_RQ_WQES_MAX 16 +#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX) + +#define BNX2FC_NUM_MAX_SESS 128 +#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) + +#define BNX2FC_MAX_OUTSTANDING_CMNDS 4096 +#define BNX2FC_MIN_PAYLOAD 256 +#define BNX2FC_MAX_PAYLOAD 2048 + +#define BNX2FC_RQ_BUF_SZ 256 +#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) + +#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe)) +#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe)) +#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ) +#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe)) +#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) +#define BNX2FC_5771X_DB_PAGE_SIZE 128 + +#define BNX2FC_MAX_TASKS BNX2FC_MAX_OUTSTANDING_CMNDS +#define BNX2FC_TASK_SIZE 128 +#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) +#define BNX2FC_TASK_CTX_ARR_SZ (BNX2FC_MAX_TASKS/BNX2FC_TASKS_PER_PAGE) + +#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8 +#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024) + +#define BNX2FC_MAX_SEQS 255 + +#define BNX2FC_READ (1 << 1) +#define BNX2FC_WRITE (1 << 0) + +#define BNX2FC_MIN_XID 0 +#define BNX2FC_MAX_XID (BNX2FC_MAX_OUTSTANDING_CMNDS - 1) +#define FCOE_MIN_XID (BNX2FC_MAX_OUTSTANDING_CMNDS) +#define FCOE_MAX_XID \ + (BNX2FC_MAX_OUTSTANDING_CMNDS + (nr_cpu_ids * 256)) +#define BNX2FC_MAX_LUN 0xFFFF +#define BNX2FC_MAX_FCP_TGT 256 +#define BNX2FC_MAX_CMD_LEN 16 + +#define BNX2FC_TM_TIMEOUT 60 /* secs */ +#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */ + +#define BNX2FC_WAIT_CNT 120 +#define BNX2FC_FW_TIMEOUT (3 * HZ) + +#define PORT_MAX 2 + +#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status) + +/* FC FCP Status */ +#define FC_GOOD 0 + +#define BNX2FC_RNID_HBA 0x7 + +/* bnx2fc driver uses only one instance of fcoe_percpu_s */ +extern struct fcoe_percpu_s bnx2fc_global; + +extern struct workqueue_struct *bnx2fc_wq; + +struct bnx2fc_percpu_s { + struct task_struct *iothread; + struct list_head work_list; + spinlock_t fp_work_lock; +}; + + +struct bnx2fc_hba { + struct list_head link; + struct cnic_dev *cnic; + struct pci_dev *pcidev; + struct net_device *netdev; + struct net_device *phys_dev; + unsigned long reg_with_cnic; + #define BNX2FC_CNIC_REGISTERED 1 + struct packet_type fcoe_packet_type; + struct packet_type fip_packet_type; + struct bnx2fc_cmd_mgr *cmd_mgr; + struct workqueue_struct *timer_work_queue; + struct kref kref; + spinlock_t hba_lock; + struct mutex hba_mutex; + unsigned long adapter_state; + #define ADAPTER_STATE_UP 0 + #define ADAPTER_STATE_GOING_DOWN 1 + #define ADAPTER_STATE_LINK_DOWN 2 + #define ADAPTER_STATE_READY 3 + u32 flags; + unsigned long init_done; + #define BNX2FC_FW_INIT_DONE 0 + #define BNX2FC_CTLR_INIT_DONE 1 + #define BNX2FC_CREATE_DONE 2 + struct fcoe_ctlr ctlr; + u8 vlan_enabled; + int vlan_id; + u32 next_conn_id; + struct fcoe_task_ctx_entry **task_ctx; + dma_addr_t *task_ctx_dma; + struct regpair *task_ctx_bd_tbl; + dma_addr_t task_ctx_bd_dma; + + int hash_tbl_segment_count; + void **hash_tbl_segments; + void *hash_tbl_pbl; + dma_addr_t hash_tbl_pbl_dma; + struct fcoe_t2_hash_table_entry *t2_hash_tbl; + dma_addr_t t2_hash_tbl_dma; + char *t2_hash_tbl_ptr; + dma_addr_t t2_hash_tbl_ptr_dma; + + char *dummy_buffer; + dma_addr_t dummy_buf_dma; + + struct fcoe_statistics_params *stats_buffer; + dma_addr_t stats_buf_dma; + + /* + * PCI related info. + */ + u16 pci_did; + u16 pci_vid; + u16 pci_sdid; + u16 pci_svid; + u16 pci_func; + u16 pci_devno; + + struct task_struct *l2_thread; + + /* linkdown handling */ + wait_queue_head_t shutdown_wait; + int wait_for_link_down; + + /*destroy handling */ + struct timer_list destroy_timer; + wait_queue_head_t destroy_wait; + + /* Active list of offloaded sessions */ + struct bnx2fc_rport *tgt_ofld_list[BNX2FC_NUM_MAX_SESS]; + int num_ofld_sess; + + /* statistics */ + struct completion stat_req_done; +}; + +#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_hba, ctlr) + +struct bnx2fc_cmd_mgr { + struct bnx2fc_hba *hba; + u16 next_idx; + struct list_head *free_list; + spinlock_t *free_list_lock; + struct io_bdt **io_bdt_pool; + struct bnx2fc_cmd **cmds; +}; + +struct bnx2fc_rport { + struct fcoe_port *port; + struct fc_rport *rport; + struct fc_rport_priv *rdata; + void __iomem *ctx_base; +#define DPM_TRIGER_TYPE 0x40 + u32 fcoe_conn_id; + u32 context_id; + u32 sid; + + unsigned long flags; +#define BNX2FC_FLAG_SESSION_READY 0x1 +#define BNX2FC_FLAG_OFFLOADED 0x2 +#define BNX2FC_FLAG_DISABLED 0x3 +#define BNX2FC_FLAG_DESTROYED 0x4 +#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5 +#define BNX2FC_FLAG_DESTROY_CMPL 0x6 +#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x7 +#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x8 +#define BNX2FC_FLAG_EXPL_LOGO 0x9 + + u32 max_sqes; + u32 max_rqes; + u32 max_cqes; + + struct fcoe_sqe *sq; + dma_addr_t sq_dma; + u16 sq_prod_idx; + u8 sq_curr_toggle_bit; + u32 sq_mem_size; + + struct fcoe_cqe *cq; + dma_addr_t cq_dma; + u32 cq_cons_idx; + u8 cq_curr_toggle_bit; + u32 cq_mem_size; + + void *rq; + dma_addr_t rq_dma; + u32 rq_prod_idx; + u32 rq_cons_idx; + u32 rq_mem_size; + + void *rq_pbl; + dma_addr_t rq_pbl_dma; + u32 rq_pbl_size; + + struct fcoe_xfrqe *xferq; + dma_addr_t xferq_dma; + u32 xferq_mem_size; + + struct fcoe_confqe *confq; + dma_addr_t confq_dma; + u32 confq_mem_size; + + void *confq_pbl; + dma_addr_t confq_pbl_dma; + u32 confq_pbl_size; + + struct fcoe_conn_db *conn_db; + dma_addr_t conn_db_dma; + u32 conn_db_mem_size; + + struct fcoe_sqe *lcq; + dma_addr_t lcq_dma; + u32 lcq_mem_size; + + void *ofld_req[4]; + dma_addr_t ofld_req_dma[4]; + void *enbl_req; + dma_addr_t enbl_req_dma; + + spinlock_t tgt_lock; + spinlock_t cq_lock; + atomic_t num_active_ios; + u32 flush_in_prog; + unsigned long work_time_slice; + unsigned long timestamp; + struct list_head free_task_list; + struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; + atomic_t pi; + atomic_t ci; + struct list_head active_cmd_queue; + struct list_head els_queue; + struct list_head io_retire_queue; + struct list_head active_tm_queue; + + struct timer_list ofld_timer; + wait_queue_head_t ofld_wait; + + struct timer_list upld_timer; + wait_queue_head_t upld_wait; +}; + +struct bnx2fc_mp_req { + u8 tm_flags; + + u32 req_len; + void *req_buf; + dma_addr_t req_buf_dma; + struct fcoe_bd_ctx *mp_req_bd; + dma_addr_t mp_req_bd_dma; + struct fc_frame_header req_fc_hdr; + + u32 resp_len; + void *resp_buf; + dma_addr_t resp_buf_dma; + struct fcoe_bd_ctx *mp_resp_bd; + dma_addr_t mp_resp_bd_dma; + struct fc_frame_header resp_fc_hdr; +}; + +struct bnx2fc_els_cb_arg { + struct bnx2fc_cmd *aborted_io_req; + struct bnx2fc_cmd *io_req; + u16 l2_oxid; +}; + +/* bnx2fc command structure */ +struct bnx2fc_cmd { + struct list_head link; + u8 on_active_queue; + u8 on_tmf_queue; + u8 cmd_type; +#define BNX2FC_SCSI_CMD 1 +#define BNX2FC_TASK_MGMT_CMD 2 +#define BNX2FC_ABTS 3 +#define BNX2FC_ELS 4 +#define BNX2FC_CLEANUP 5 + u8 io_req_flags; + struct kref refcount; + struct fcoe_port *port; + struct bnx2fc_rport *tgt; + struct scsi_cmnd *sc_cmd; + struct bnx2fc_cmd_mgr *cmd_mgr; + struct bnx2fc_mp_req mp_req; + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg); + struct bnx2fc_els_cb_arg *cb_arg; + struct delayed_work timeout_work; /* timer for ULP timeouts */ + struct completion tm_done; + int wait_for_comp; + u16 xid; + struct fcoe_task_ctx_entry *task; + struct io_bdt *bd_tbl; + struct fcp_rsp *rsp; + size_t data_xfer_len; + unsigned long req_flags; +#define BNX2FC_FLAG_ISSUE_RRQ 0x1 +#define BNX2FC_FLAG_ISSUE_ABTS 0x2 +#define BNX2FC_FLAG_ABTS_DONE 0x3 +#define BNX2FC_FLAG_TM_COMPL 0x4 +#define BNX2FC_FLAG_TM_TIMEOUT 0x5 +#define BNX2FC_FLAG_IO_CLEANUP 0x6 +#define BNX2FC_FLAG_RETIRE_OXID 0x7 +#define BNX2FC_FLAG_EH_ABORT 0x8 +#define BNX2FC_FLAG_IO_COMPL 0x9 +#define BNX2FC_FLAG_ELS_DONE 0xa +#define BNX2FC_FLAG_ELS_TIMEOUT 0xb + u32 fcp_resid; + u32 fcp_rsp_len; + u32 fcp_sns_len; + u8 cdb_status; /* SCSI IO status */ + u8 fcp_status; /* FCP IO status */ + u8 fcp_rsp_code; + u8 scsi_comp_flags; +}; + +struct io_bdt { + struct bnx2fc_cmd *io_req; + struct fcoe_bd_ctx *bd_tbl; + dma_addr_t bd_tbl_dma; + u16 bd_valid; +}; + +struct bnx2fc_work { + struct list_head list; + struct bnx2fc_rport *tgt; + u16 wqe; +}; +struct bnx2fc_unsol_els { + struct fc_lport *lport; + struct fc_frame *fp; + struct work_struct unsol_els_work; +}; + + + +struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type); +void bnx2fc_cmd_release(struct kref *ref); +int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd); +int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba); +int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba); +int bnx2fc_send_session_ofld_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt); +int bnx2fc_send_session_disable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt); +int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt); +void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], + u32 num_cqe); +int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba); +void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba); +int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba); +void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba); +struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, + u16 min_xid, u16 max_xid); +void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr); +void bnx2fc_get_link_state(struct bnx2fc_hba *hba); +char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items); +void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items); +int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen); +int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req); +int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req); +int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req); +void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, + unsigned int timer_msec); +int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req); +void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u16 orig_xid); +void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task); +void bnx2fc_init_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task); +void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid); +void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt); +int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd); +int bnx2fc_eh_host_reset(struct scsi_cmnd *sc_cmd); +int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd); +int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd); +void bnx2fc_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rport, + enum fc_rport_event event); +void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, + struct fcp_cmnd *fcp_cmnd); + + + +void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt); +struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout); +int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt); +void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe); +struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, + u32 port_id); +void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, + unsigned char *buf, + u32 frame_len, u16 l2_oxid); +int bnx2fc_send_stat_req(struct bnx2fc_hba *hba); + +#endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h new file mode 100644 index 000000000000..fe7769173c43 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h @@ -0,0 +1,206 @@ +#ifndef __BNX2FC_CONSTANTS_H_ +#define __BNX2FC_CONSTANTS_H_ + +/** + * This file defines HSI constants for the FCoE flows + */ + +/* KWQ/KCQ FCoE layer code */ +#define FCOE_KWQE_LAYER_CODE (7) + +/* KWQ (kernel work queue) request op codes */ +#define FCOE_KWQE_OPCODE_INIT1 (0) +#define FCOE_KWQE_OPCODE_INIT2 (1) +#define FCOE_KWQE_OPCODE_INIT3 (2) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6) +#define FCOE_KWQE_OPCODE_ENABLE_CONN (7) +#define FCOE_KWQE_OPCODE_DISABLE_CONN (8) +#define FCOE_KWQE_OPCODE_DESTROY_CONN (9) +#define FCOE_KWQE_OPCODE_DESTROY (10) +#define FCOE_KWQE_OPCODE_STAT (11) + +/* KCQ (kernel completion queue) response op codes */ +#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10) +#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11) +#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12) +#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15) +#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16) +#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17) +#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18) +#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) +#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21) + +/* KCQ (kernel completion queue) completion status */ +#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0) +#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1) +#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2) +#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3) +#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4) +#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5) + +/* Unsolicited CQE type */ +#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0 +#define FCOE_ERROR_DETECTION_CQE_TYPE 1 +#define FCOE_WARNING_DETECTION_CQE_TYPE 2 + +/* Task context constants */ +/* After driver has initialize the task in case timer services required */ +#define FCOE_TASK_TX_STATE_INIT 0 +/* In case timer services are required then shall be updated by Xstorm after + * start processing the task. In case no timer facilities are required then the + * driver would initialize the state to this value */ +#define FCOE_TASK_TX_STATE_NORMAL 1 +/* Task is under abort procedure. Updated in order to stop processing of + * pending WQEs on this task */ +#define FCOE_TASK_TX_STATE_ABORT 2 +/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */ +#define FCOE_TASK_TX_STATE_ERROR 3 +/* For REC_TOV timer expiration indication received from Xstorm */ +#define FCOE_TASK_TX_STATE_WARNING 4 +/* For completed unsolicited task */ +#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5 +/* For exchange cleanup request task */ +#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6 +/* For sequence cleanup request task */ +#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7 +/* Mark task as aborted and indicate that ABTS was not transmitted */ +#define FCOE_TASK_TX_STATE_BEFORE_ABTS_TX 8 +/* Mark task as aborted and indicate that ABTS was transmitted */ +#define FCOE_TASK_TX_STATE_AFTER_ABTS_TX 9 +/* For completion the ABTS task. */ +#define FCOE_TASK_TX_STATE_ABTS_TX_COMPLETED 10 +/* Mark task as aborted and indicate that Exchange cleanup was not transmitted + */ +#define FCOE_TASK_TX_STATE_BEFORE_EXCHANGE_CLEANUP_TX 11 +/* Mark task as aborted and indicate that Exchange cleanup was transmitted */ +#define FCOE_TASK_TX_STATE_AFTER_EXCHANGE_CLEANUP_TX 12 + +#define FCOE_TASK_RX_STATE_NORMAL 0 +#define FCOE_TASK_RX_STATE_COMPLETED 1 +/* Obsolete: Intermediate completion (middle path with local completion) */ +#define FCOE_TASK_RX_STATE_INTER_COMP 2 +/* For REC_TOV timer expiration indication received from Xstorm */ +#define FCOE_TASK_RX_STATE_WARNING 3 +/* For E_D_T_TOV timer expiration in Ustorm */ +#define FCOE_TASK_RX_STATE_ERROR 4 +/* ABTS ACC arrived wait for local completion to finally complete the task. */ +#define FCOE_TASK_RX_STATE_ABTS_ACC_ARRIVED 5 +/* local completion arrived wait for ABTS ACC to finally complete the task. */ +#define FCOE_TASK_RX_STATE_ABTS_LOCAL_COMP_ARRIVED 6 +/* Special completion indication in case of task was aborted. */ +#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7 +/* Special completion indication in case of task was cleaned. */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 8 +/* Special completion indication (in task requested the exchange cleanup) in + * case cleaned task is in non-valid. */ +#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 9 +/* Special completion indication (in task requested the sequence cleanup) in + * case cleaned task was already returned to normal. */ +#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 10 +/* Exchange cleanup arrived wait until xfer will be handled to finally + * complete the task. */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_ARRIVED 11 +/* Xfer handled, wait for exchange cleanup to finally complete the task. */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_HANDLED_XFER 12 + +#define FCOE_TASK_TYPE_WRITE 0 +#define FCOE_TASK_TYPE_READ 1 +#define FCOE_TASK_TYPE_MIDPATH 2 +#define FCOE_TASK_TYPE_UNSOLICITED 3 +#define FCOE_TASK_TYPE_ABTS 4 +#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5 +#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6 + +#define FCOE_TASK_DEV_TYPE_DISK 0 +#define FCOE_TASK_DEV_TYPE_TAPE 1 + +#define FCOE_TASK_CLASS_TYPE_3 0 +#define FCOE_TASK_CLASS_TYPE_2 1 + +/* Everest FCoE connection type */ +#define B577XX_FCOE_CONNECTION_TYPE 4 + +/* Error codes for Error Reporting in fast path flows */ +/* XFER error codes */ +#define FCOE_ERROR_CODE_XFER_OOO_RO 0 +#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1 +#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2 +#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3 +#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4 +#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5 +#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6 +#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7 +#define FCOE_ERROR_CODE_XFER_FCTL 8 + +/* FCP RSP error codes */ +#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9 +#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10 +#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14 +#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15 +#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16 +#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17 +#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18 +#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19 + +/* FCP DATA error codes */ +#define FCOE_ERROR_CODE_DATA_OOO_RO 20 +#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21 +#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22 +#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23 +#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24 +#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25 +#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26 +#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27 +#define FCOE_ERROR_CODE_DATA_FCTL 28 + +/* Middle path error codes */ +#define FCOE_ERROR_CODE_MIDPATH_TYPE_NOT_ELS 29 +#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30 +#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31 +#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32 +#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33 +#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_FCTL 34 +#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35 +#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36 + +/* ABTS error codes */ +#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37 +#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38 +#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39 +#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40 +#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41 + +/* Common error codes */ +#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42 +#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43 +#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44 +#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45 +#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46 +#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47 +#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48 +#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49 +#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50 + +/* Unsolicited Rx error codes */ +#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51 +#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52 +#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53 +#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54 +#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55 + +#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56 +#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57 +#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58 + +/* Timer error codes */ +#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60 +#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61 + + +#endif /* BNX2FC_CONSTANTS_H_ */ diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h new file mode 100644 index 000000000000..7f6aff68cc53 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h @@ -0,0 +1,70 @@ +#ifndef __BNX2FC_DEBUG__ +#define __BNX2FC_DEBUG__ + +/* Log level bit mask */ +#define LOG_IO 0x01 /* scsi cmd error, cleanup */ +#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */ +#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */ +#define LOG_ELS 0x08 /* ELS logs */ +#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/ +#define LOG_ALL 0xff /* LOG all messages */ + +extern unsigned int bnx2fc_debug_level; + +#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \ + do { \ + if (unlikely(bnx2fc_debug_level & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ + } while (0) + +#define BNX2FC_ELS_DBG(fmt, arg...) \ + BNX2FC_CHK_LOGGING(LOG_ELS, \ + printk(KERN_ALERT PFX fmt, ##arg)) + +#define BNX2FC_MISC_DBG(fmt, arg...) \ + BNX2FC_CHK_LOGGING(LOG_MISC, \ + printk(KERN_ALERT PFX fmt, ##arg)) + +#define BNX2FC_IO_DBG(io_req, fmt, arg...) \ + do { \ + if (!io_req || !io_req->port || !io_req->port->lport || \ + !io_req->port->lport->host) \ + BNX2FC_CHK_LOGGING(LOG_IO, \ + printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ + else \ + BNX2FC_CHK_LOGGING(LOG_IO, \ + shost_printk(KERN_ALERT, \ + (io_req)->port->lport->host, \ + PFX "xid:0x%x " fmt, \ + (io_req)->xid, ##arg)); \ + } while (0) + +#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \ + do { \ + if (!tgt || !tgt->port || !tgt->port->lport || \ + !tgt->port->lport->host || !tgt->rport) \ + BNX2FC_CHK_LOGGING(LOG_TGT, \ + printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ + else \ + BNX2FC_CHK_LOGGING(LOG_TGT, \ + shost_printk(KERN_ALERT, \ + (tgt)->port->lport->host, \ + PFX "port:%x " fmt, \ + (tgt)->rport->port_id, ##arg)); \ + } while (0) + + +#define BNX2FC_HBA_DBG(lport, fmt, arg...) \ + do { \ + if (!lport || !lport->host) \ + BNX2FC_CHK_LOGGING(LOG_HBA, \ + printk(KERN_ALERT PFX "NULL " fmt, ##arg)); \ + else \ + BNX2FC_CHK_LOGGING(LOG_HBA, \ + shost_printk(KERN_ALERT, lport->host, \ + PFX fmt, ##arg)); \ + } while (0) + +#endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c new file mode 100644 index 000000000000..7a11a255157f --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -0,0 +1,515 @@ +/* + * bnx2fc_els.c: Broadcom NetXtreme II Linux FCoE offload driver. + * This file contains helper routines that handle ELS requests + * and responses. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg); +static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg); +static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, + void *data, u32 data_len, + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), + struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec); + +static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *orig_io_req; + struct bnx2fc_cmd *rrq_req; + int rc = 0; + + BUG_ON(!cb_arg); + rrq_req = cb_arg->io_req; + orig_io_req = cb_arg->aborted_io_req; + BUG_ON(!orig_io_req); + BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n", + orig_io_req->xid, rrq_req->xid); + + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) { + /* + * els req is timed out. cleanup the IO with FW and + * drop the completion. Remove from active_cmd_queue. + */ + BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n", + rrq_req->xid); + + if (rrq_req->on_active_queue) { + list_del_init(&rrq_req->link); + rrq_req->on_active_queue = 0; + rc = bnx2fc_initiate_cleanup(rrq_req); + BUG_ON(rc); + } + } + kfree(cb_arg); +} +int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req) +{ + + struct fc_els_rrq rrq; + struct bnx2fc_rport *tgt = aborted_io_req->tgt; + struct fc_lport *lport = tgt->rdata->local_port; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + u32 sid = tgt->sid; + u32 r_a_tov = lport->r_a_tov; + unsigned long start = jiffies; + int rc; + + BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n", + aborted_io_req->xid); + memset(&rrq, 0, sizeof(rrq)); + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n"); + rc = -ENOMEM; + goto rrq_err; + } + + cb_arg->aborted_io_req = aborted_io_req; + + rrq.rrq_cmd = ELS_RRQ; + hton24(rrq.rrq_s_id, sid); + rrq.rrq_ox_id = htons(aborted_io_req->xid); + rrq.rrq_rx_id = htons(aborted_io_req->task->rx_wr_tx_rd.rx_id); + +retry_rrq: + rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq), + bnx2fc_rrq_compl, cb_arg, + r_a_tov); + if (rc == -ENOMEM) { + if (time_after(jiffies, start + (10 * HZ))) { + BNX2FC_ELS_DBG("rrq Failed\n"); + rc = FAILED; + goto rrq_err; + } + msleep(20); + goto retry_rrq; + } +rrq_err: + if (rc) { + BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n", + aborted_io_req->xid); + kfree(cb_arg); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + } + return rc; +} + +static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *els_req; + struct bnx2fc_rport *tgt; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + unsigned char *buf; + void *resp_buf; + u32 resp_len, hdr_len; + u16 l2_oxid; + int frame_len; + int rc = 0; + + l2_oxid = cb_arg->l2_oxid; + BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid); + + els_req = cb_arg->io_req; + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) { + /* + * els req is timed out. cleanup the IO with FW and + * drop the completion. libfc will handle the els timeout + */ + if (els_req->on_active_queue) { + list_del_init(&els_req->link); + els_req->on_active_queue = 0; + rc = bnx2fc_initiate_cleanup(els_req); + BUG_ON(rc); + } + goto free_arg; + } + + tgt = els_req->tgt; + mp_req = &(els_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!buf) { + printk(KERN_ERR PFX "Unable to alloc mp buf\n"); + goto free_arg; + } + hdr_len = sizeof(*fc_hdr); + if (hdr_len + resp_len > PAGE_SIZE) { + printk(KERN_ERR PFX "l2_els_compl: resp len is " + "beyond page size\n"); + goto free_buf; + } + memcpy(buf, fc_hdr, hdr_len); + memcpy(buf + hdr_len, resp_buf, resp_len); + frame_len = hdr_len + resp_len; + + bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid); + +free_buf: + kfree(buf); +free_arg: + kfree(cb_arg); +} + +int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_adisc *adisc; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid); + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + /* adisc is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_logo *logo; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid); + logo = fc_frame_payload_get(fp, sizeof(*logo)); + /* logo is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_rls *rls; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + rls = fc_frame_payload_get(fp, sizeof(*rls)); + /* rls is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, + void *data, u32 data_len, + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), + struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_hba *hba = port->priv; + struct fc_rport *rport = tgt->rport; + struct fc_lport *lport = port->lport; + struct bnx2fc_cmd *els_req; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + int rc = 0; + int task_idx, index; + u32 did, sid; + u16 xid; + + rc = fc_remote_port_chkready(rport); + if (rc) { + printk(KERN_ALERT PFX "els 0x%x: rport not ready\n", op); + rc = -EINVAL; + goto els_err; + } + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ALERT PFX "els 0x%x: link is not ready\n", op); + rc = -EINVAL; + goto els_err; + } + if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) || + (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) { + printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op); + rc = -EINVAL; + goto els_err; + } + els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS); + if (!els_req) { + rc = -ENOMEM; + goto els_err; + } + + els_req->sc_cmd = NULL; + els_req->port = port; + els_req->tgt = tgt; + els_req->cb_func = cb_func; + cb_arg->io_req = els_req; + els_req->cb_arg = cb_arg; + + mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); + rc = bnx2fc_init_mp_req(els_req); + if (rc == FAILED) { + printk(KERN_ALERT PFX "ELS MP request init failed\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + rc = -ENOMEM; + goto els_err; + } else { + /* rc SUCCESS */ + rc = 0; + } + + /* Set the data_xfer_len to the size of ELS payload */ + mp_req->req_len = data_len; + els_req->data_xfer_len = mp_req->req_len; + + /* Fill ELS Payload */ + if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { + memcpy(mp_req->req_buf, data, data_len); + } else { + printk(KERN_ALERT PFX "Invalid ELS op 0x%x\n", op); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + spin_lock_bh(&tgt->tgt_lock); + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + rc = -EINVAL; + } + + if (rc) + goto els_err; + + /* Fill FC header */ + fc_hdr = &(mp_req->req_fc_hdr); + + did = tgt->rport->port_id; + sid = tgt->sid; + + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, + FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + + /* Obtain exchange id */ + xid = els_req->xid; + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(els_req, task); + + spin_lock_bh(&tgt->tgt_lock); + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "initiate_els.. session not ready\n"); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return -EINVAL; + } + + if (timer_msec) + bnx2fc_cmd_timer_set(els_req, timer_msec); + bnx2fc_add_2_sq(tgt, xid); + + els_req->on_active_queue = 1; + list_add_tail(&els_req->link, &tgt->els_queue); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + spin_unlock_bh(&tgt->tgt_lock); + +els_err: + return rc; +} + +void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, + struct fcoe_task_ctx_entry *task, u8 num_rq) +{ + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + u64 *hdr; + u64 *temp_hdr; + + BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x" + "cmd_type = %d\n", els_req->xid, els_req->cmd_type); + + if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, + &els_req->req_flags)) { + BNX2FC_ELS_DBG("Timer context finished processing this " + "els - 0x%x\n", els_req->xid); + /* This IO doesnt receive cleanup completion */ + kref_put(&els_req->refcount, bnx2fc_cmd_release); + return; + } + + /* Cancel the timeout_work, as we received the response */ + if (cancel_delayed_work(&els_req->timeout_work)) + kref_put(&els_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + if (els_req->on_active_queue) { + list_del_init(&els_req->link); + els_req->on_active_queue = 0; + } + + mp_req = &(els_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + + hdr = (u64 *)fc_hdr; + temp_hdr = (u64 *) + &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + mp_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off; + + /* Parse ELS response */ + if ((els_req->cb_func) && (els_req->cb_arg)) { + els_req->cb_func(els_req->cb_arg); + els_req->cb_arg = NULL; + } + + kref_put(&els_req->refcount, bnx2fc_cmd_release); +} + +static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + u8 *mac; + struct fc_frame_header *fh; + u8 op; + + if (IS_ERR(fp)) + goto done; + + mac = fr_cb(fp)->granted_mac; + if (is_zero_ether_addr(mac)) { + fh = fc_frame_header_get(fp); + if (fh->fh_type != FC_TYPE_ELS) { + printk(KERN_ERR PFX "bnx2fc_flogi_resp:" + "fh_type != FC_TYPE_ELS\n"); + fc_frame_free(fp); + return; + } + op = fc_frame_payload_op(fp); + if (lport->vport) { + if (op == ELS_LS_RJT) { + printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n"); + fc_vport_terminate(lport->vport); + fc_frame_free(fp); + return; + } + } + if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { + fc_frame_free(fp); + return; + } + } + fip->update_mac(lport, mac); +done: + fc_lport_flogi_resp(seq, fp, lport); +} + +static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + static u8 zero_mac[ETH_ALEN] = { 0 }; + + if (!IS_ERR(fp)) + fip->update_mac(lport, zero_mac); + fc_lport_logo_resp(seq, fp, lport); +} + +struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct fcoe_ctlr *fip = &hba->ctlr; + struct fc_frame_header *fh = fc_frame_header_get(fp); + + switch (op) { + case ELS_FLOGI: + case ELS_FDISC: + return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp, + fip, timeout); + case ELS_LOGO: + /* only hook onto fabric logouts, not port logouts */ + if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) + break; + return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp, + fip, timeout); + } + return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c new file mode 100644 index 000000000000..e476e8753079 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -0,0 +1,2535 @@ +/* bnx2fc_fcoe.c: Broadcom NetXtreme II Linux FCoE offload driver. + * This file contains the code that interacts with libfc, libfcoe, + * cnic modules to create FCoE instances, send/receive non-offloaded + * FIP/FCoE packets, listen to link events etc. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +static struct list_head adapter_list; +static u32 adapter_count; +static DEFINE_MUTEX(bnx2fc_dev_lock); +DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); + +#define DRV_MODULE_NAME "bnx2fc" +#define DRV_MODULE_VERSION BNX2FC_VERSION +#define DRV_MODULE_RELDATE "Jan 25, 2011" + + +static char version[] __devinitdata = + "Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \ + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + + +MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>"); +MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 FCoE Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define BNX2FC_MAX_QUEUE_DEPTH 256 +#define BNX2FC_MIN_QUEUE_DEPTH 32 +#define FCOE_WORD_TO_BYTE 4 + +static struct scsi_transport_template *bnx2fc_transport_template; +static struct scsi_transport_template *bnx2fc_vport_xport_template; + +struct workqueue_struct *bnx2fc_wq; + +/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure. + * Here the io threads are per cpu but the l2 thread is just one + */ +struct fcoe_percpu_s bnx2fc_global; +DEFINE_SPINLOCK(bnx2fc_global_lock); + +static struct cnic_ulp_ops bnx2fc_cnic_cb; +static struct libfc_function_template bnx2fc_libfc_fcn_templ; +static struct scsi_host_template bnx2fc_shost_template; +static struct fc_function_template bnx2fc_transport_function; +static struct fc_function_template bnx2fc_vport_xport_function; +static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); +static int bnx2fc_destroy(struct net_device *net_device); +static int bnx2fc_enable(struct net_device *netdev); +static int bnx2fc_disable(struct net_device *netdev); + +static void bnx2fc_recv_frame(struct sk_buff *skb); + +static void bnx2fc_start_disc(struct bnx2fc_hba *hba); +static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); +static int bnx2fc_net_config(struct fc_lport *lp); +static int bnx2fc_lport_config(struct fc_lport *lport); +static int bnx2fc_em_config(struct fc_lport *lport); +static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); +static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); +static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); +static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); +static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, + struct device *parent, int npiv); +static void bnx2fc_destroy_work(struct work_struct *work); + +static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); +static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); + +static int bnx2fc_fw_init(struct bnx2fc_hba *hba); +static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); + +static void bnx2fc_port_shutdown(struct fc_lport *lport); +static void bnx2fc_stop(struct bnx2fc_hba *hba); +static int __init bnx2fc_mod_init(void); +static void __exit bnx2fc_mod_exit(void); + +unsigned int bnx2fc_debug_level; +module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); + +static int bnx2fc_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu); +/* notification function for CPU hotplug events */ +static struct notifier_block bnx2fc_cpu_notifier = { + .notifier_call = bnx2fc_cpu_callback, +}; + +static void bnx2fc_clean_rx_queue(struct fc_lport *lp) +{ + struct fcoe_percpu_s *bg; + struct fcoe_rcv_info *fr; + struct sk_buff_head *list; + struct sk_buff *skb, *next; + struct sk_buff *head; + + bg = &bnx2fc_global; + spin_lock_bh(&bg->fcoe_rx_list.lock); + list = &bg->fcoe_rx_list; + head = list->next; + for (skb = head; skb != (struct sk_buff *)list; + skb = next) { + next = skb->next; + fr = fcoe_dev_from_skb(skb); + if (fr->fr_dev == lp) { + __skb_unlink(skb, list); + kfree_skb(skb); + } + } + spin_unlock_bh(&bg->fcoe_rx_list.lock); +} + +int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen) +{ + int rc; + spin_lock(&bnx2fc_global_lock); + rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global); + spin_unlock(&bnx2fc_global_lock); + + return rc; +} + +static void bnx2fc_abort_io(struct fc_lport *lport) +{ + /* + * This function is no-op for bnx2fc, but we do + * not want to leave it as NULL either, as libfc + * can call the default function which is + * fc_fcp_abort_io. + */ +} + +static void bnx2fc_cleanup(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_rport *tgt; + int i; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + mutex_lock(&hba->hba_mutex); + spin_lock_bh(&hba->hba_lock); + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + tgt = hba->tgt_ofld_list[i]; + if (tgt) { + /* Cleanup IOs belonging to requested vport */ + if (tgt->port == port) { + spin_unlock_bh(&hba->hba_lock); + BNX2FC_TGT_DBG(tgt, "flush/cleanup\n"); + bnx2fc_flush_active_ios(tgt); + spin_lock_bh(&hba->hba_lock); + } + } + } + spin_unlock_bh(&hba->hba_lock); + mutex_unlock(&hba->hba_mutex); +} + +static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt, + struct fc_frame *fp) +{ + struct fc_rport_priv *rdata = tgt->rdata; + struct fc_frame_header *fh; + int rc = 0; + + fh = fc_frame_header_get(fp); + BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, " + "r_ctl = 0x%x\n", rdata->ids.port_id, + ntohs(fh->fh_ox_id), fh->fh_r_ctl); + if ((fh->fh_type == FC_TYPE_ELS) && + (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + + switch (fc_frame_payload_op(fp)) { + case ELS_ADISC: + rc = bnx2fc_send_adisc(tgt, fp); + break; + case ELS_LOGO: + rc = bnx2fc_send_logo(tgt, fp); + break; + case ELS_RLS: + rc = bnx2fc_send_rls(tgt, fp); + break; + default: + break; + } + } else if ((fh->fh_type == FC_TYPE_BLS) && + (fh->fh_r_ctl == FC_RCTL_BA_ABTS)) + BNX2FC_TGT_DBG(tgt, "ABTS frame\n"); + else { + BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x " + "rctl 0x%x thru non-offload path\n", + fh->fh_type, fh->fh_r_ctl); + return -ENODEV; + } + if (rc) + return -ENOMEM; + else + return 0; +} + +/** + * bnx2fc_xmit - bnx2fc's FCoE frame transmit function + * + * @lport: the associated local port + * @fp: the fc_frame to be transmitted + */ +static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) +{ + struct ethhdr *eh; + struct fcoe_crc_eof *cp; + struct sk_buff *skb; + struct fc_frame_header *fh; + struct bnx2fc_hba *hba; + struct fcoe_port *port; + struct fcoe_hdr *hp; + struct bnx2fc_rport *tgt; + struct fcoe_dev_stats *stats; + u8 sof, eof; + u32 crc; + unsigned int hlen, tlen, elen; + int wlen, rc = 0; + + port = (struct fcoe_port *)lport_priv(lport); + hba = port->priv; + + fh = fc_frame_header_get(fp); + + skb = fp_skb(fp); + if (!lport->link_up) { + BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n"); + kfree_skb(skb); + return 0; + } + + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + if (!hba->ctlr.sel_fcf) { + BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); + kfree_skb(skb); + return -EINVAL; + } + if (fcoe_ctlr_els_send(&hba->ctlr, lport, skb)) + return 0; + } + + sof = fr_sof(fp); + eof = fr_eof(fp); + + /* + * Snoop the frame header to check if the frame is for + * an offloaded session + */ + /* + * tgt_ofld_list access is synchronized using + * both hba mutex and hba lock. Atleast hba mutex or + * hba lock needs to be held for read access. + */ + + spin_lock_bh(&hba->hba_lock); + tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id)); + if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { + /* This frame is for offloaded session */ + BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session " + "port_id = 0x%x\n", ntoh24(fh->fh_d_id)); + spin_unlock_bh(&hba->hba_lock); + rc = bnx2fc_xmit_l2_frame(tgt, fp); + if (rc != -ENODEV) { + kfree_skb(skb); + return rc; + } + } else { + spin_unlock_bh(&hba->hba_lock); + } + + elen = sizeof(struct ethhdr); + hlen = sizeof(struct fcoe_hdr); + tlen = sizeof(struct fcoe_crc_eof); + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; + + skb->ip_summed = CHECKSUM_NONE; + crc = fcoe_fc_crc(fp); + + /* copy port crc and eof to the skb buff */ + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag; + if (bnx2fc_get_paged_crc_eof(skb, tlen)) { + kfree_skb(skb); + return -ENOMEM; + } + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ) + + frag->page_offset; + } else { + cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); + } + + memset(cp, 0, sizeof(*cp)); + cp->fcoe_eof = eof; + cp->fcoe_crc32 = cpu_to_le32(~crc); + if (skb_is_nonlinear(skb)) { + kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); + cp = NULL; + } + + /* adjust skb network/transport offsets to match mac/fcoe/port */ + skb_push(skb, elen + hlen); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->mac_len = elen; + skb->protocol = htons(ETH_P_FCOE); + skb->dev = hba->netdev; + + /* fill up mac and fcoe headers */ + eh = eth_hdr(skb); + eh->h_proto = htons(ETH_P_FCOE); + if (hba->ctlr.map_dest) + fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); + else + /* insert GW address */ + memcpy(eh->h_dest, hba->ctlr.dest_addr, ETH_ALEN); + + if (unlikely(hba->ctlr.flogi_oxid != FC_XID_UNKNOWN)) + memcpy(eh->h_source, hba->ctlr.ctl_src_addr, ETH_ALEN); + else + memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); + + hp = (struct fcoe_hdr *)(eh + 1); + memset(hp, 0, sizeof(*hp)); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); + hp->fcoe_sof = sof; + + /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ + if (lport->seq_offload && fr_max_payload(fp)) { + skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; + skb_shinfo(skb)->gso_size = fr_max_payload(fp); + } else { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + } + + /*update tx stats */ + stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats->TxFrames++; + stats->TxWords += wlen; + put_cpu(); + + /* send down to lld */ + fr_dev(fp) = lport; + if (port->fcoe_pending_queue.qlen) + fcoe_check_wait_queue(lport, skb); + else if (fcoe_start_io(skb)) + fcoe_check_wait_queue(lport, skb); + + return 0; +} + +/** + * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ + * + * @skb: the receive socket buffer + * @dev: associated net device + * @ptype: context + * @olddev: last device + * + * This function receives the packet and builds FC frame and passes it up + */ +static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *olddev) +{ + struct fc_lport *lport; + struct bnx2fc_hba *hba; + struct fc_frame_header *fh; + struct fcoe_rcv_info *fr; + struct fcoe_percpu_s *bg; + unsigned short oxid; + + hba = container_of(ptype, struct bnx2fc_hba, fcoe_packet_type); + lport = hba->ctlr.lp; + + if (unlikely(lport == NULL)) { + printk(KERN_ALERT PFX "bnx2fc_rcv: lport is NULL\n"); + goto err; + } + + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { + printk(KERN_ALERT PFX "bnx2fc_rcv: Wrong FC type frame\n"); + goto err; + } + + /* + * Check for minimum frame length, and make sure required FCoE + * and FC headers are pulled into the linear data area. + */ + if (unlikely((skb->len < FCOE_MIN_FRAME) || + !pskb_may_pull(skb, FCOE_HEADER_LEN))) + goto err; + + skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); + fh = (struct fc_frame_header *) skb_transport_header(skb); + + oxid = ntohs(fh->fh_ox_id); + + fr = fcoe_dev_from_skb(skb); + fr->fr_dev = lport; + fr->ptype = ptype; + + bg = &bnx2fc_global; + spin_lock_bh(&bg->fcoe_rx_list.lock); + + __skb_queue_tail(&bg->fcoe_rx_list, skb); + if (bg->fcoe_rx_list.qlen == 1) + wake_up_process(bg->thread); + + spin_unlock_bh(&bg->fcoe_rx_list.lock); + + return 0; +err: + kfree_skb(skb); + return -1; +} + +static int bnx2fc_l2_rcv_thread(void *arg) +{ + struct fcoe_percpu_s *bg = arg; + struct sk_buff *skb; + + set_user_nice(current, -20); + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_RUNNING); + spin_lock_bh(&bg->fcoe_rx_list.lock); + while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { + spin_unlock_bh(&bg->fcoe_rx_list.lock); + bnx2fc_recv_frame(skb); + spin_lock_bh(&bg->fcoe_rx_list.lock); + } + spin_unlock_bh(&bg->fcoe_rx_list.lock); + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + return 0; +} + + +static void bnx2fc_recv_frame(struct sk_buff *skb) +{ + u32 fr_len; + struct fc_lport *lport; + struct fcoe_rcv_info *fr; + struct fcoe_dev_stats *stats; + struct fc_frame_header *fh; + struct fcoe_crc_eof crc_eof; + struct fc_frame *fp; + struct fc_lport *vn_port; + struct fcoe_port *port; + u8 *mac = NULL; + u8 *dest_mac = NULL; + struct fcoe_hdr *hp; + + fr = fcoe_dev_from_skb(skb); + lport = fr->fr_dev; + if (unlikely(lport == NULL)) { + printk(KERN_ALERT PFX "Invalid lport struct\n"); + kfree_skb(skb); + return; + } + + if (skb_is_nonlinear(skb)) + skb_linearize(skb); + mac = eth_hdr(skb)->h_source; + dest_mac = eth_hdr(skb)->h_dest; + + /* Pull the header */ + hp = (struct fcoe_hdr *) skb_network_header(skb); + fh = (struct fc_frame_header *) skb_transport_header(skb); + skb_pull(skb, sizeof(struct fcoe_hdr)); + fr_len = skb->len - sizeof(struct fcoe_crc_eof); + + stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = hp->fcoe_sof; + if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { + put_cpu(); + kfree_skb(skb); + return; + } + fr_eof(fp) = crc_eof.fcoe_eof; + fr_crc(fp) = crc_eof.fcoe_crc32; + if (pskb_trim(skb, fr_len)) { + put_cpu(); + kfree_skb(skb); + return; + } + + fh = fc_frame_header_get(fp); + + vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); + if (vn_port) { + port = lport_priv(vn_port); + if (compare_ether_addr(port->data_src_addr, dest_mac) + != 0) { + BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); + put_cpu(); + kfree_skb(skb); + return; + } + } + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && + fh->fh_type == FC_TYPE_FCP) { + /* Drop FCP data. We dont this in L2 path */ + put_cpu(); + kfree_skb(skb); + return; + } + if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && + fh->fh_type == FC_TYPE_ELS) { + switch (fc_frame_payload_op(fp)) { + case ELS_LOGO: + if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { + /* drop non-FIP LOGO */ + put_cpu(); + kfree_skb(skb); + return; + } + break; + } + } + if (le32_to_cpu(fr_crc(fp)) != + ~crc32(~0, skb->data, fr_len)) { + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING PFX "dropping frame with " + "CRC error\n"); + stats->InvalidCRCCount++; + put_cpu(); + kfree_skb(skb); + return; + } + put_cpu(); + fc_exch_recv(lport, fp); +} + +/** + * bnx2fc_percpu_io_thread - thread per cpu for ios + * + * @arg: ptr to bnx2fc_percpu_info structure + */ +int bnx2fc_percpu_io_thread(void *arg) +{ + struct bnx2fc_percpu_s *p = arg; + struct bnx2fc_work *work, *tmp; + LIST_HEAD(work_list); + + set_user_nice(current, -20); + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_RUNNING); + spin_lock_bh(&p->fp_work_lock); + while (!list_empty(&p->work_list)) { + list_splice_init(&p->work_list, &work_list); + spin_unlock_bh(&p->fp_work_lock); + + list_for_each_entry_safe(work, tmp, &work_list, list) { + list_del_init(&work->list); + bnx2fc_process_cq_compl(work->tgt, work->wqe); + kfree(work); + } + + spin_lock_bh(&p->fp_work_lock); + } + spin_unlock_bh(&p->fp_work_lock); + set_current_state(TASK_INTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + + return 0; +} + +static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) +{ + struct fc_host_statistics *bnx2fc_stats; + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct fcoe_statistics_params *fw_stats; + int rc = 0; + + fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer; + if (!fw_stats) + return NULL; + + bnx2fc_stats = fc_get_host_stats(shost); + + init_completion(&hba->stat_req_done); + if (bnx2fc_send_stat_req(hba)) + return bnx2fc_stats; + rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); + if (!rc) { + BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); + return bnx2fc_stats; + } + bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat1.fc_crc_cnt; + bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt; + bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4; + bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt; + bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4; + + bnx2fc_stats->dumped_frames = 0; + bnx2fc_stats->lip_count = 0; + bnx2fc_stats->nos_count = 0; + bnx2fc_stats->loss_of_sync_count = 0; + bnx2fc_stats->loss_of_signal_count = 0; + bnx2fc_stats->prim_seq_protocol_err_count = 0; + + return bnx2fc_stats; +} + +static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct Scsi_Host *shost = lport->host; + int rc = 0; + + shost->max_cmd_len = BNX2FC_MAX_CMD_LEN; + shost->max_lun = BNX2FC_MAX_LUN; + shost->max_id = BNX2FC_MAX_FCP_TGT; + shost->max_channel = 0; + if (lport->vport) + shost->transportt = bnx2fc_vport_xport_template; + else + shost->transportt = bnx2fc_transport_template; + + /* Add the new host to SCSI-ml */ + rc = scsi_add_host(lport->host, dev); + if (rc) { + printk(KERN_ERR PFX "Error on scsi_add_host\n"); + return rc; + } + if (!lport->vport) + fc_host_max_npiv_vports(lport->host) = USHRT_MAX; + sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", + BNX2FC_NAME, BNX2FC_VERSION, + hba->netdev->name); + + return 0; +} + +static int bnx2fc_mfs_update(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct net_device *netdev = hba->netdev; + u32 mfs; + u32 max_mfs; + + mfs = netdev->mtu - (sizeof(struct fcoe_hdr) + + sizeof(struct fcoe_crc_eof)); + max_mfs = BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header); + BNX2FC_HBA_DBG(lport, "mfs = %d, max_mfs = %d\n", mfs, max_mfs); + if (mfs > max_mfs) + mfs = max_mfs; + + /* Adjust mfs to be a multiple of 256 bytes */ + mfs = (((mfs - sizeof(struct fc_frame_header)) / BNX2FC_MIN_PAYLOAD) * + BNX2FC_MIN_PAYLOAD); + mfs = mfs + sizeof(struct fc_frame_header); + + BNX2FC_HBA_DBG(lport, "Set MFS = %d\n", mfs); + if (fc_set_mfs(lport, mfs)) + return -EINVAL; + return 0; +} +static void bnx2fc_link_speed_update(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct net_device *netdev = hba->netdev; + struct ethtool_cmd ecmd = { ETHTOOL_GSET }; + + if (!dev_ethtool_get_settings(netdev, &ecmd)) { + lport->link_supported_speeds &= + ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); + if (ecmd.supported & (SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full)) + lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; + if (ecmd.supported & SUPPORTED_10000baseT_Full) + lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; + + if (ecmd.speed == SPEED_1000) + lport->link_speed = FC_PORTSPEED_1GBIT; + if (ecmd.speed == SPEED_10000) + lport->link_speed = FC_PORTSPEED_10GBIT; + } + return; +} +static int bnx2fc_link_ok(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct net_device *dev = hba->phys_dev; + int rc = 0; + + if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else { + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + rc = -1; + } + return rc; +} + +/** + * bnx2fc_get_link_state - get network link state + * + * @hba: adapter instance pointer + * + * updates adapter structure flag based on netdev state + */ +void bnx2fc_get_link_state(struct bnx2fc_hba *hba) +{ + if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); +} + +static int bnx2fc_net_config(struct fc_lport *lport) +{ + struct bnx2fc_hba *hba; + struct fcoe_port *port; + u64 wwnn, wwpn; + + port = lport_priv(lport); + hba = port->priv; + + /* require support for get_pauseparam ethtool op. */ + if (!hba->phys_dev->ethtool_ops || + !hba->phys_dev->ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + if (bnx2fc_mfs_update(lport)) + return -EINVAL; + + skb_queue_head_init(&port->fcoe_pending_queue); + port->fcoe_pending_queue_active = 0; + setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport); + + bnx2fc_link_speed_update(lport); + + if (!lport->vport) { + wwnn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 1, 0); + BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); + fc_set_wwnn(lport, wwnn); + + wwpn = fcoe_wwn_from_mac(hba->ctlr.ctl_src_addr, 2, 0); + BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); + fc_set_wwpn(lport, wwpn); + } + + return 0; +} + +static void bnx2fc_destroy_timer(unsigned long data) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; + + BNX2FC_HBA_DBG(hba->ctlr.lp, "ERROR:bnx2fc_destroy_timer - " + "Destroy compl not received!!\n"); + hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; + wake_up_interruptible(&hba->destroy_wait); +} + +/** + * bnx2fc_indicate_netevent - Generic netdev event handler + * + * @context: adapter structure pointer + * @event: event type + * + * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and + * NETDEV_CHANGE_MTU events + */ +static void bnx2fc_indicate_netevent(void *context, unsigned long event) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; + struct fc_lport *lport = hba->ctlr.lp; + struct fc_lport *vport; + u32 link_possible = 1; + + if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + BNX2FC_MISC_DBG("driver not ready. event=%s %ld\n", + hba->netdev->name, event); + return; + } + + /* + * ASSUMPTION: + * indicate_netevent cannot be called from cnic unless bnx2fc + * does register_device + */ + BUG_ON(!lport); + + BNX2FC_HBA_DBG(lport, "enter netevent handler - event=%s %ld\n", + hba->netdev->name, event); + + switch (event) { + case NETDEV_UP: + BNX2FC_HBA_DBG(lport, "Port up, adapter_state = %ld\n", + hba->adapter_state); + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) + printk(KERN_ERR "indicate_netevent: "\ + "adapter is not UP!!\n"); + /* fall thru to update mfs if MTU has changed */ + case NETDEV_CHANGEMTU: + BNX2FC_HBA_DBG(lport, "NETDEV_CHANGEMTU event\n"); + bnx2fc_mfs_update(lport); + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + bnx2fc_mfs_update(vport); + mutex_unlock(&lport->lp_mutex); + break; + + case NETDEV_DOWN: + BNX2FC_HBA_DBG(lport, "Port down\n"); + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + link_possible = 0; + break; + + case NETDEV_GOING_DOWN: + BNX2FC_HBA_DBG(lport, "Port going down\n"); + set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + link_possible = 0; + break; + + case NETDEV_CHANGE: + BNX2FC_HBA_DBG(lport, "NETDEV_CHANGE\n"); + break; + + default: + printk(KERN_ERR PFX "Unkonwn netevent %ld", event); + return; + } + + bnx2fc_link_speed_update(lport); + + if (link_possible && !bnx2fc_link_ok(lport)) { + printk(KERN_ERR "indicate_netevent: call ctlr_link_up\n"); + fcoe_ctlr_link_up(&hba->ctlr); + } else { + printk(KERN_ERR "indicate_netevent: call ctlr_link_down\n"); + if (fcoe_ctlr_link_down(&hba->ctlr)) { + clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + fc_host_port_type(vport->host) = + FC_PORTTYPE_UNKNOWN; + mutex_unlock(&lport->lp_mutex); + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + per_cpu_ptr(lport->dev_stats, + get_cpu())->LinkFailureCount++; + put_cpu(); + fcoe_clean_pending_queue(lport); + + init_waitqueue_head(&hba->shutdown_wait); + BNX2FC_HBA_DBG(lport, "indicate_netevent " + "num_ofld_sess = %d\n", + hba->num_ofld_sess); + hba->wait_for_link_down = 1; + BNX2FC_HBA_DBG(lport, "waiting for uploads to " + "compl proc = %s\n", + current->comm); + wait_event_interruptible(hba->shutdown_wait, + (hba->num_ofld_sess == 0)); + BNX2FC_HBA_DBG(lport, "wakeup - num_ofld_sess = %d\n", + hba->num_ofld_sess); + hba->wait_for_link_down = 0; + + if (signal_pending(current)) + flush_signals(current); + } + } +} + +static int bnx2fc_libfc_config(struct fc_lport *lport) +{ + + /* Set the function pointers set by bnx2fc driver */ + memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ, + sizeof(struct libfc_function_template)); + fc_elsct_init(lport); + fc_exch_init(lport); + fc_rport_init(lport); + fc_disc_init(lport); + return 0; +} + +static int bnx2fc_em_config(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + + if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, + FCOE_MAX_XID, NULL)) { + printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); + return -ENOMEM; + } + + hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba, BNX2FC_MIN_XID, + BNX2FC_MAX_XID); + + if (!hba->cmd_mgr) { + printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); + fc_exch_mgr_free(lport); + return -ENOMEM; + } + return 0; +} + +static int bnx2fc_lport_config(struct fc_lport *lport) +{ + lport->link_up = 0; + lport->qfull = 0; + lport->max_retry_count = 3; + lport->max_rport_retry_count = 3; + lport->e_d_tov = 2 * 1000; + lport->r_a_tov = 10 * 1000; + + /* REVISIT: enable when supporting tape devices + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + */ + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS); + lport->does_npiv = 1; + + memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen)); + lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA; + + /* alloc stats structure */ + if (fc_lport_init_stats(lport)) + return -ENOMEM; + + /* Finish fc_lport configuration */ + fc_lport_config(lport); + + return 0; +} + +/** + * bnx2fc_fip_recv - handle a received FIP frame. + * + * @skb: the received skb + * @dev: associated &net_device + * @ptype: the &packet_type structure which was used to register this handler. + * @orig_dev: original receive &net_device, in case @ dev is a bond. + * + * Returns: 0 for success + */ +static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, + struct net_device *orig_dev) +{ + struct bnx2fc_hba *hba; + hba = container_of(ptype, struct bnx2fc_hba, fip_packet_type); + fcoe_ctlr_recv(&hba->ctlr, skb); + return 0; +} + +/** + * bnx2fc_update_src_mac - Update Ethernet MAC filters. + * + * @fip: FCoE controller. + * @old: Unicast MAC address to delete if the MAC is non-zero. + * @new: Unicast MAC address to add. + * + * Remove any previously-set unicast MAC filter. + * Add secondary FCoE MAC address filter for our OUI. + */ +static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr) +{ + struct fcoe_port *port = lport_priv(lport); + + memcpy(port->data_src_addr, addr, ETH_ALEN); +} + +/** + * bnx2fc_get_src_mac - return the ethernet source address for an lport + * + * @lport: libfc port + */ +static u8 *bnx2fc_get_src_mac(struct fc_lport *lport) +{ + struct fcoe_port *port; + + port = (struct fcoe_port *)lport_priv(lport); + return port->data_src_addr; +} + +/** + * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame. + * + * @fip: FCoE controller. + * @skb: FIP Packet. + */ +static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + skb->dev = bnx2fc_from_ctlr(fip)->netdev; + dev_queue_xmit(skb); +} + +static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fcoe_port *port = lport_priv(n_port); + struct bnx2fc_hba *hba = port->priv; + struct net_device *netdev = hba->netdev; + struct fc_lport *vn_port; + + if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + printk(KERN_ERR PFX "vn ports cannot be created on" + "this hba\n"); + return -EIO; + } + mutex_lock(&bnx2fc_dev_lock); + vn_port = bnx2fc_if_create(hba, &vport->dev, 1); + mutex_unlock(&bnx2fc_dev_lock); + + if (IS_ERR(vn_port)) { + printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n", + netdev->name); + return -EIO; + } + + if (disabled) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + } else { + vn_port->boot_time = jiffies; + fc_lport_init(vn_port); + fc_fabric_login(vn_port); + fc_vport_setlink(vn_port); + } + return 0; +} + +static int bnx2fc_vport_destroy(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port = vport->dd_data; + struct fcoe_port *port = lport_priv(vn_port); + + mutex_lock(&n_port->lp_mutex); + list_del(&vn_port->list); + mutex_unlock(&n_port->lp_mutex); + queue_work(bnx2fc_wq, &port->destroy_work); + return 0; +} + +static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable) +{ + struct fc_lport *lport = vport->dd_data; + + if (disable) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + fc_fabric_logoff(lport); + } else { + lport->boot_time = jiffies; + fc_fabric_login(lport); + fc_vport_setlink(lport); + } + return 0; +} + + +static int bnx2fc_netdev_setup(struct bnx2fc_hba *hba) +{ + struct net_device *netdev = hba->netdev; + struct net_device *physdev = hba->phys_dev; + struct netdev_hw_addr *ha; + int sel_san_mac = 0; + + /* Do not support for bonding device */ + if ((netdev->priv_flags & IFF_MASTER_ALB) || + (netdev->priv_flags & IFF_SLAVE_INACTIVE) || + (netdev->priv_flags & IFF_MASTER_8023AD)) { + return -EOPNOTSUPP; + } + + /* setup Source MAC Address */ + rcu_read_lock(); + for_each_dev_addr(physdev, ha) { + BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ", + ha->type); + printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0], + ha->addr[1], ha->addr[2], ha->addr[3], + ha->addr[4], ha->addr[5]); + + if ((ha->type == NETDEV_HW_ADDR_T_SAN) && + (is_valid_ether_addr(ha->addr))) { + memcpy(hba->ctlr.ctl_src_addr, ha->addr, ETH_ALEN); + sel_san_mac = 1; + BNX2FC_MISC_DBG("Found SAN MAC\n"); + } + } + rcu_read_unlock(); + + if (!sel_san_mac) + return -ENODEV; + + hba->fip_packet_type.func = bnx2fc_fip_recv; + hba->fip_packet_type.type = htons(ETH_P_FIP); + hba->fip_packet_type.dev = netdev; + dev_add_pack(&hba->fip_packet_type); + + hba->fcoe_packet_type.func = bnx2fc_rcv; + hba->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); + hba->fcoe_packet_type.dev = netdev; + dev_add_pack(&hba->fcoe_packet_type); + + return 0; +} + +static int bnx2fc_attach_transport(void) +{ + bnx2fc_transport_template = + fc_attach_transport(&bnx2fc_transport_function); + + if (bnx2fc_transport_template == NULL) { + printk(KERN_ERR PFX "Failed to attach FC transport\n"); + return -ENODEV; + } + + bnx2fc_vport_xport_template = + fc_attach_transport(&bnx2fc_vport_xport_function); + if (bnx2fc_vport_xport_template == NULL) { + printk(KERN_ERR PFX + "Failed to attach FC transport for vport\n"); + fc_release_transport(bnx2fc_transport_template); + bnx2fc_transport_template = NULL; + return -ENODEV; + } + return 0; +} +static void bnx2fc_release_transport(void) +{ + fc_release_transport(bnx2fc_transport_template); + fc_release_transport(bnx2fc_vport_xport_template); + bnx2fc_transport_template = NULL; + bnx2fc_vport_xport_template = NULL; +} + +static void bnx2fc_interface_release(struct kref *kref) +{ + struct bnx2fc_hba *hba; + struct net_device *netdev; + struct net_device *phys_dev; + + hba = container_of(kref, struct bnx2fc_hba, kref); + BNX2FC_HBA_DBG(hba->ctlr.lp, "Interface is being released\n"); + + netdev = hba->netdev; + phys_dev = hba->phys_dev; + + /* tear-down FIP controller */ + if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done)) + fcoe_ctlr_destroy(&hba->ctlr); + + /* Free the command manager */ + if (hba->cmd_mgr) { + bnx2fc_cmd_mgr_free(hba->cmd_mgr); + hba->cmd_mgr = NULL; + } + dev_put(netdev); + module_put(THIS_MODULE); +} + +static inline void bnx2fc_interface_get(struct bnx2fc_hba *hba) +{ + kref_get(&hba->kref); +} + +static inline void bnx2fc_interface_put(struct bnx2fc_hba *hba) +{ + kref_put(&hba->kref, bnx2fc_interface_release); +} +static void bnx2fc_interface_destroy(struct bnx2fc_hba *hba) +{ + bnx2fc_unbind_pcidev(hba); + kfree(hba); +} + +/** + * bnx2fc_interface_create - create a new fcoe instance + * + * @cnic: pointer to cnic device + * + * Creates a new FCoE instance on the given device which include allocating + * hba structure, scsi_host and lport structures. + */ +static struct bnx2fc_hba *bnx2fc_interface_create(struct cnic_dev *cnic) +{ + struct bnx2fc_hba *hba; + int rc; + + hba = kzalloc(sizeof(*hba), GFP_KERNEL); + if (!hba) { + printk(KERN_ERR PFX "Unable to allocate hba structure\n"); + return NULL; + } + spin_lock_init(&hba->hba_lock); + mutex_init(&hba->hba_mutex); + + hba->cnic = cnic; + rc = bnx2fc_bind_pcidev(hba); + if (rc) + goto bind_err; + hba->phys_dev = cnic->netdev; + /* will get overwritten after we do vlan discovery */ + hba->netdev = hba->phys_dev; + + init_waitqueue_head(&hba->shutdown_wait); + init_waitqueue_head(&hba->destroy_wait); + + return hba; +bind_err: + printk(KERN_ERR PFX "create_interface: bind error\n"); + kfree(hba); + return NULL; +} + +static int bnx2fc_interface_setup(struct bnx2fc_hba *hba, + enum fip_state fip_mode) +{ + int rc = 0; + struct net_device *netdev = hba->netdev; + struct fcoe_ctlr *fip = &hba->ctlr; + + dev_hold(netdev); + kref_init(&hba->kref); + + hba->flags = 0; + + /* Initialize FIP */ + memset(fip, 0, sizeof(*fip)); + fcoe_ctlr_init(fip, fip_mode); + hba->ctlr.send = bnx2fc_fip_send; + hba->ctlr.update_mac = bnx2fc_update_src_mac; + hba->ctlr.get_src_addr = bnx2fc_get_src_mac; + set_bit(BNX2FC_CTLR_INIT_DONE, &hba->init_done); + + rc = bnx2fc_netdev_setup(hba); + if (rc) + goto setup_err; + + hba->next_conn_id = 0; + + memset(hba->tgt_ofld_list, 0, sizeof(hba->tgt_ofld_list)); + hba->num_ofld_sess = 0; + + return 0; + +setup_err: + fcoe_ctlr_destroy(&hba->ctlr); + dev_put(netdev); + bnx2fc_interface_put(hba); + return rc; +} + +/** + * bnx2fc_if_create - Create FCoE instance on a given interface + * + * @hba: FCoE interface to create a local port on + * @parent: Device pointer to be the parent in sysfs for the SCSI host + * @npiv: Indicates if the port is vport or not + * + * Creates a fc_lport instance and a Scsi_Host instance and configure them. + * + * Returns: Allocated fc_lport or an error pointer + */ +static struct fc_lport *bnx2fc_if_create(struct bnx2fc_hba *hba, + struct device *parent, int npiv) +{ + struct fc_lport *lport = NULL; + struct fcoe_port *port; + struct Scsi_Host *shost; + struct fc_vport *vport = dev_to_vport(parent); + int rc = 0; + + /* Allocate Scsi_Host structure */ + if (!npiv) { + lport = libfc_host_alloc(&bnx2fc_shost_template, + sizeof(struct fcoe_port)); + } else { + lport = libfc_vport_create(vport, + sizeof(struct fcoe_port)); + } + + if (!lport) { + printk(KERN_ERR PFX "could not allocate scsi host structure\n"); + return NULL; + } + shost = lport->host; + port = lport_priv(lport); + port->lport = lport; + port->priv = hba; + INIT_WORK(&port->destroy_work, bnx2fc_destroy_work); + + /* Configure fcoe_port */ + rc = bnx2fc_lport_config(lport); + if (rc) + goto lp_config_err; + + if (npiv) { + vport = dev_to_vport(parent); + printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n", + vport->node_name, vport->port_name); + fc_set_wwnn(lport, vport->node_name); + fc_set_wwpn(lport, vport->port_name); + } + /* Configure netdev and networking properties of the lport */ + rc = bnx2fc_net_config(lport); + if (rc) { + printk(KERN_ERR PFX "Error on bnx2fc_net_config\n"); + goto lp_config_err; + } + + rc = bnx2fc_shost_config(lport, parent); + if (rc) { + printk(KERN_ERR PFX "Couldnt configure shost for %s\n", + hba->netdev->name); + goto lp_config_err; + } + + /* Initialize the libfc library */ + rc = bnx2fc_libfc_config(lport); + if (rc) { + printk(KERN_ERR PFX "Couldnt configure libfc\n"); + goto shost_err; + } + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + + /* Allocate exchange manager */ + if (!npiv) { + rc = bnx2fc_em_config(lport); + if (rc) { + printk(KERN_ERR PFX "Error on bnx2fc_em_config\n"); + goto shost_err; + } + } + + bnx2fc_interface_get(hba); + return lport; + +shost_err: + scsi_remove_host(shost); +lp_config_err: + scsi_host_put(lport->host); + return NULL; +} + +static void bnx2fc_netdev_cleanup(struct bnx2fc_hba *hba) +{ + /* Dont listen for Ethernet packets anymore */ + __dev_remove_pack(&hba->fcoe_packet_type); + __dev_remove_pack(&hba->fip_packet_type); + synchronize_net(); +} + +static void bnx2fc_if_destroy(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + + BNX2FC_HBA_DBG(hba->ctlr.lp, "ENTERED bnx2fc_if_destroy\n"); + /* Stop the transmit retry timer */ + del_timer_sync(&port->timer); + + /* Free existing transmit skbs */ + fcoe_clean_pending_queue(lport); + + bnx2fc_interface_put(hba); + + /* Free queued packets for the receive thread */ + bnx2fc_clean_rx_queue(lport); + + /* Detach from scsi-ml */ + fc_remove_host(lport->host); + scsi_remove_host(lport->host); + + /* + * Note that only the physical lport will have the exchange manager. + * for vports, this function is NOP + */ + fc_exch_mgr_free(lport); + + /* Free memory used by statistical counters */ + fc_lport_free_stats(lport); + + /* Release Scsi_Host */ + scsi_host_put(lport->host); +} + +/** + * bnx2fc_destroy - Destroy a bnx2fc FCoE interface + * + * @buffer: The name of the Ethernet interface to be destroyed + * @kp: The associated kernel parameter + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int bnx2fc_destroy(struct net_device *netdev) +{ + struct bnx2fc_hba *hba = NULL; + struct net_device *phys_dev; + int rc = 0; + + if (!rtnl_trylock()) + return restart_syscall(); + + mutex_lock(&bnx2fc_dev_lock); +#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE + if (THIS_MODULE->state != MODULE_STATE_LIVE) { + rc = -ENODEV; + goto netdev_err; + } +#endif + /* obtain physical netdev */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) + phys_dev = vlan_dev_real_dev(netdev); + else { + printk(KERN_ERR PFX "Not a vlan device\n"); + rc = -ENODEV; + goto netdev_err; + } + + hba = bnx2fc_hba_lookup(phys_dev); + if (!hba || !hba->ctlr.lp) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_destroy: hba or lport not found\n"); + goto netdev_err; + } + + if (!test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + printk(KERN_ERR PFX "bnx2fc_destroy: Create not called\n"); + goto netdev_err; + } + + bnx2fc_netdev_cleanup(hba); + + bnx2fc_stop(hba); + + bnx2fc_if_destroy(hba->ctlr.lp); + + destroy_workqueue(hba->timer_work_queue); + + if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) + bnx2fc_fw_destroy(hba); + + clear_bit(BNX2FC_CREATE_DONE, &hba->init_done); +netdev_err: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +static void bnx2fc_destroy_work(struct work_struct *work) +{ + struct fcoe_port *port; + struct fc_lport *lport; + + port = container_of(work, struct fcoe_port, destroy_work); + lport = port->lport; + + BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); + + bnx2fc_port_shutdown(lport); + rtnl_lock(); + mutex_lock(&bnx2fc_dev_lock); + bnx2fc_if_destroy(lport); + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); +} + +static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba) +{ + bnx2fc_free_fw_resc(hba); + bnx2fc_free_task_ctx(hba); +} + +/** + * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated + * pci structure + * + * @hba: Adapter instance + */ +static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba) +{ + if (bnx2fc_setup_task_ctx(hba)) + goto mem_err; + + if (bnx2fc_setup_fw_resc(hba)) + goto mem_err; + + return 0; +mem_err: + bnx2fc_unbind_adapter_devices(hba); + return -ENOMEM; +} + +static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) +{ + struct cnic_dev *cnic; + + if (!hba->cnic) { + printk(KERN_ERR PFX "cnic is NULL\n"); + return -ENODEV; + } + cnic = hba->cnic; + hba->pcidev = cnic->pcidev; + if (hba->pcidev) + pci_dev_get(hba->pcidev); + + return 0; +} + +static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) +{ + if (hba->pcidev) + pci_dev_put(hba->pcidev); + hba->pcidev = NULL; +} + + + +/** + * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance + * + * @handle: transport handle pointing to adapter struture + * + * This function maps adapter structure to pcidev structure and initiates + * firmware handshake to enable/initialize on-chip FCoE components. + * This bnx2fc - cnic interface api callback is used after following + * conditions are met - + * a) underlying network interface is up (marked by event NETDEV_UP + * from netdev + * b) bnx2fc adatper structure is registered. + */ +static void bnx2fc_ulp_start(void *handle) +{ + struct bnx2fc_hba *hba = handle; + struct fc_lport *lport = hba->ctlr.lp; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + mutex_lock(&bnx2fc_dev_lock); + + if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) + goto start_disc; + + if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) + bnx2fc_fw_init(hba); + +start_disc: + mutex_unlock(&bnx2fc_dev_lock); + + BNX2FC_MISC_DBG("bnx2fc started.\n"); + + /* Kick off Fabric discovery*/ + if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + printk(KERN_ERR PFX "ulp_init: start discovery\n"); + lport->tt.frame_send = bnx2fc_xmit; + bnx2fc_start_disc(hba); + } +} + +static void bnx2fc_port_shutdown(struct fc_lport *lport) +{ + BNX2FC_MISC_DBG("Entered %s\n", __func__); + fc_fabric_logoff(lport); + fc_lport_destroy(lport); +} + +static void bnx2fc_stop(struct bnx2fc_hba *hba) +{ + struct fc_lport *lport; + struct fc_lport *vport; + + BNX2FC_MISC_DBG("ENTERED %s - init_done = %ld\n", __func__, + hba->init_done); + if (test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done) && + test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + lport = hba->ctlr.lp; + bnx2fc_port_shutdown(lport); + BNX2FC_HBA_DBG(lport, "bnx2fc_stop: waiting for %d " + "offloaded sessions\n", + hba->num_ofld_sess); + wait_event_interruptible(hba->shutdown_wait, + (hba->num_ofld_sess == 0)); + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + fc_host_port_type(vport->host) = FC_PORTTYPE_UNKNOWN; + mutex_unlock(&lport->lp_mutex); + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + fcoe_ctlr_link_down(&hba->ctlr); + fcoe_clean_pending_queue(lport); + + mutex_lock(&hba->hba_mutex); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + + clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + mutex_unlock(&hba->hba_mutex); + } +} + +static int bnx2fc_fw_init(struct bnx2fc_hba *hba) +{ +#define BNX2FC_INIT_POLL_TIME (1000 / HZ) + int rc = -1; + int i = HZ; + + rc = bnx2fc_bind_adapter_devices(hba); + if (rc) { + printk(KERN_ALERT PFX + "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc); + goto err_out; + } + + rc = bnx2fc_send_fw_fcoe_init_msg(hba); + if (rc) { + printk(KERN_ALERT PFX + "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc); + goto err_unbind; + } + + /* + * Wait until the adapter init message is complete, and adapter + * state is UP. + */ + while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) + msleep(BNX2FC_INIT_POLL_TIME); + + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) { + printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. " + "Ignoring...\n", + hba->cnic->netdev->name); + rc = -1; + goto err_unbind; + } + + + /* Mark HBA to indicate that the FW INIT is done */ + set_bit(BNX2FC_FW_INIT_DONE, &hba->init_done); + return 0; + +err_unbind: + bnx2fc_unbind_adapter_devices(hba); +err_out: + return rc; +} + +static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) +{ + if (test_and_clear_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { + init_timer(&hba->destroy_timer); + hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + + jiffies; + hba->destroy_timer.function = bnx2fc_destroy_timer; + hba->destroy_timer.data = (unsigned long)hba; + add_timer(&hba->destroy_timer); + wait_event_interruptible(hba->destroy_wait, + (hba->flags & + BNX2FC_FLAG_DESTROY_CMPL)); + /* This should never happen */ + if (signal_pending(current)) + flush_signals(current); + + del_timer_sync(&hba->destroy_timer); + } + bnx2fc_unbind_adapter_devices(hba); + } +} + +/** + * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance + * + * @handle: transport handle pointing to adapter structure + * + * Driver checks if adapter is already in shutdown mode, if not start + * the shutdown process. + */ +static void bnx2fc_ulp_stop(void *handle) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)handle; + + printk(KERN_ERR "ULP_STOP\n"); + + mutex_lock(&bnx2fc_dev_lock); + bnx2fc_stop(hba); + bnx2fc_fw_destroy(hba); + mutex_unlock(&bnx2fc_dev_lock); +} + +static void bnx2fc_start_disc(struct bnx2fc_hba *hba) +{ + struct fc_lport *lport; + int wait_cnt = 0; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + /* Kick off FIP/FLOGI */ + if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + printk(KERN_ERR PFX "Init not done yet\n"); + return; + } + + lport = hba->ctlr.lp; + BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); + + if (!bnx2fc_link_ok(lport)) { + BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); + fcoe_ctlr_link_up(&hba->ctlr); + fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; + set_bit(ADAPTER_STATE_READY, &hba->adapter_state); + } + + /* wait for the FCF to be selected before issuing FLOGI */ + while (!hba->ctlr.sel_fcf) { + msleep(250); + /* give up after 3 secs */ + if (++wait_cnt > 12) + break; + } + fc_lport_init(lport); + fc_fabric_login(lport); +} + + +/** + * bnx2fc_ulp_init - Initialize an adapter instance + * + * @dev : cnic device handle + * Called from cnic_register_driver() context to initialize all + * enumerated cnic devices. This routine allocates adapter structure + * and other device specific resources. + */ +static void bnx2fc_ulp_init(struct cnic_dev *dev) +{ + struct bnx2fc_hba *hba; + int rc = 0; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + /* bnx2fc works only when bnx2x is loaded */ + if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s," + " flags: %lx\n", + dev->netdev->name, dev->flags); + return; + } + + /* Configure FCoE interface */ + hba = bnx2fc_interface_create(dev); + if (!hba) { + printk(KERN_ERR PFX "hba initialization failed\n"); + return; + } + + /* Add HBA to the adapter list */ + mutex_lock(&bnx2fc_dev_lock); + list_add_tail(&hba->link, &adapter_list); + adapter_count++; + mutex_unlock(&bnx2fc_dev_lock); + + clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); + rc = dev->register_device(dev, CNIC_ULP_FCOE, + (void *) hba); + if (rc) + printk(KERN_ALERT PFX "register_device failed, rc = %d\n", rc); + else + set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); +} + + +static int bnx2fc_disable(struct net_device *netdev) +{ + struct bnx2fc_hba *hba; + struct net_device *phys_dev; + struct ethtool_drvinfo drvinfo; + int rc = 0; + + if (!rtnl_trylock()) { + printk(KERN_ERR PFX "retrying for rtnl_lock\n"); + return -EIO; + } + + mutex_lock(&bnx2fc_dev_lock); + + if (THIS_MODULE->state != MODULE_STATE_LIVE) { + rc = -ENODEV; + goto nodev; + } + + /* obtain physical netdev */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) + phys_dev = vlan_dev_real_dev(netdev); + else { + printk(KERN_ERR PFX "Not a vlan device\n"); + rc = -ENODEV; + goto nodev; + } + + /* verify if the physical device is a netxtreme2 device */ + if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); + if (strcmp(drvinfo.driver, "bnx2x")) { + printk(KERN_ERR PFX "Not a netxtreme2 device\n"); + rc = -ENODEV; + goto nodev; + } + } else { + printk(KERN_ERR PFX "unable to obtain drv_info\n"); + rc = -ENODEV; + goto nodev; + } + + printk(KERN_ERR PFX "phys_dev is netxtreme2 device\n"); + + /* obtain hba and initialize rest of the structure */ + hba = bnx2fc_hba_lookup(phys_dev); + if (!hba || !hba->ctlr.lp) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_disable: hba or lport not found\n"); + } else { + fcoe_ctlr_link_down(&hba->ctlr); + fcoe_clean_pending_queue(hba->ctlr.lp); + } + +nodev: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + + +static int bnx2fc_enable(struct net_device *netdev) +{ + struct bnx2fc_hba *hba; + struct net_device *phys_dev; + struct ethtool_drvinfo drvinfo; + int rc = 0; + + if (!rtnl_trylock()) { + printk(KERN_ERR PFX "retrying for rtnl_lock\n"); + return -EIO; + } + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + mutex_lock(&bnx2fc_dev_lock); + + if (THIS_MODULE->state != MODULE_STATE_LIVE) { + rc = -ENODEV; + goto nodev; + } + + /* obtain physical netdev */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) + phys_dev = vlan_dev_real_dev(netdev); + else { + printk(KERN_ERR PFX "Not a vlan device\n"); + rc = -ENODEV; + goto nodev; + } + /* verify if the physical device is a netxtreme2 device */ + if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); + if (strcmp(drvinfo.driver, "bnx2x")) { + printk(KERN_ERR PFX "Not a netxtreme2 device\n"); + rc = -ENODEV; + goto nodev; + } + } else { + printk(KERN_ERR PFX "unable to obtain drv_info\n"); + rc = -ENODEV; + goto nodev; + } + + /* obtain hba and initialize rest of the structure */ + hba = bnx2fc_hba_lookup(phys_dev); + if (!hba || !hba->ctlr.lp) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_enable: hba or lport not found\n"); + } else if (!bnx2fc_link_ok(hba->ctlr.lp)) + fcoe_ctlr_link_up(&hba->ctlr); + +nodev: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +/** + * bnx2fc_create - Create bnx2fc FCoE interface + * + * @buffer: The name of Ethernet interface to create on + * @kp: The associated kernel param + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) +{ + struct bnx2fc_hba *hba; + struct net_device *phys_dev; + struct fc_lport *lport; + struct ethtool_drvinfo drvinfo; + int rc = 0; + int vlan_id; + + BNX2FC_MISC_DBG("Entered bnx2fc_create\n"); + if (fip_mode != FIP_MODE_FABRIC) { + printk(KERN_ERR "fip mode not FABRIC\n"); + return -EIO; + } + + if (!rtnl_trylock()) { + printk(KERN_ERR "trying for rtnl_lock\n"); + return -EIO; + } + mutex_lock(&bnx2fc_dev_lock); + +#ifdef CONFIG_SCSI_BNX2X_FCOE_MODULE + if (THIS_MODULE->state != MODULE_STATE_LIVE) { + rc = -ENODEV; + goto mod_err; + } +#endif + + if (!try_module_get(THIS_MODULE)) { + rc = -EINVAL; + goto mod_err; + } + + /* obtain physical netdev */ + if (netdev->priv_flags & IFF_802_1Q_VLAN) { + phys_dev = vlan_dev_real_dev(netdev); + vlan_id = vlan_dev_vlan_id(netdev); + } else { + printk(KERN_ERR PFX "Not a vlan device\n"); + rc = -EINVAL; + goto netdev_err; + } + /* verify if the physical device is a netxtreme2 device */ + if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); + if (strcmp(drvinfo.driver, "bnx2x")) { + printk(KERN_ERR PFX "Not a netxtreme2 device\n"); + rc = -EINVAL; + goto netdev_err; + } + } else { + printk(KERN_ERR PFX "unable to obtain drv_info\n"); + rc = -EINVAL; + goto netdev_err; + } + + /* obtain hba and initialize rest of the structure */ + hba = bnx2fc_hba_lookup(phys_dev); + if (!hba) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_create: hba not found\n"); + goto netdev_err; + } + + if (!test_bit(BNX2FC_FW_INIT_DONE, &hba->init_done)) { + rc = bnx2fc_fw_init(hba); + if (rc) + goto netdev_err; + } + + if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + rc = -EEXIST; + goto netdev_err; + } + + /* update netdev with vlan netdev */ + hba->netdev = netdev; + hba->vlan_id = vlan_id; + hba->vlan_enabled = 1; + + rc = bnx2fc_interface_setup(hba, fip_mode); + if (rc) { + printk(KERN_ERR PFX "bnx2fc_interface_setup failed\n"); + goto ifput_err; + } + + hba->timer_work_queue = + create_singlethread_workqueue("bnx2fc_timer_wq"); + if (!hba->timer_work_queue) { + printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); + rc = -EINVAL; + goto ifput_err; + } + + lport = bnx2fc_if_create(hba, &hba->pcidev->dev, 0); + if (!lport) { + printk(KERN_ERR PFX "Failed to create interface (%s)\n", + netdev->name); + bnx2fc_netdev_cleanup(hba); + rc = -EINVAL; + goto if_create_err; + } + + lport->boot_time = jiffies; + + /* Make this master N_port */ + hba->ctlr.lp = lport; + + set_bit(BNX2FC_CREATE_DONE, &hba->init_done); + printk(KERN_ERR PFX "create: START DISC\n"); + bnx2fc_start_disc(hba); + /* + * Release from kref_init in bnx2fc_interface_setup, on success + * lport should be holding a reference taken in bnx2fc_if_create + */ + bnx2fc_interface_put(hba); + /* put netdev that was held while calling dev_get_by_name */ + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return 0; + +if_create_err: + destroy_workqueue(hba->timer_work_queue); +ifput_err: + bnx2fc_interface_put(hba); +netdev_err: + module_put(THIS_MODULE); +mod_err: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +/** + * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc adapter instance + * + * @cnic: Pointer to cnic device instance + * + **/ +static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic) +{ + struct list_head *list; + struct list_head *temp; + struct bnx2fc_hba *hba; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_safe(list, temp, &adapter_list) { + hba = (struct bnx2fc_hba *)list; + if (hba->cnic == cnic) + return hba; + } + return NULL; +} + +static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev) +{ + struct list_head *list; + struct list_head *temp; + struct bnx2fc_hba *hba; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_safe(list, temp, &adapter_list) { + hba = (struct bnx2fc_hba *)list; + if (hba->phys_dev == phys_dev) + return hba; + } + printk(KERN_ERR PFX "hba_lookup: hba NULL\n"); + return NULL; +} + +/** + * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources + * + * @dev cnic device handle + */ +static void bnx2fc_ulp_exit(struct cnic_dev *dev) +{ + struct bnx2fc_hba *hba; + + BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); + + if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n", + dev->netdev->name, dev->flags); + return; + } + + mutex_lock(&bnx2fc_dev_lock); + hba = bnx2fc_find_hba_for_cnic(dev); + if (!hba) { + printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n", + dev); + mutex_unlock(&bnx2fc_dev_lock); + return; + } + + list_del_init(&hba->link); + adapter_count--; + + if (test_bit(BNX2FC_CREATE_DONE, &hba->init_done)) { + /* destroy not called yet, move to quiesced list */ + bnx2fc_netdev_cleanup(hba); + bnx2fc_if_destroy(hba->ctlr.lp); + } + mutex_unlock(&bnx2fc_dev_lock); + + bnx2fc_ulp_stop(hba); + /* unregister cnic device */ + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); + bnx2fc_interface_destroy(hba); +} + +/** + * bnx2fc_fcoe_reset - Resets the fcoe + * + * @shost: shost the reset is from + * + * Returns: always 0 + */ +static int bnx2fc_fcoe_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + fc_lport_reset(lport); + return 0; +} + + +static bool bnx2fc_match(struct net_device *netdev) +{ + mutex_lock(&bnx2fc_dev_lock); + if (netdev->priv_flags & IFF_802_1Q_VLAN) { + struct net_device *phys_dev = vlan_dev_real_dev(netdev); + + if (bnx2fc_hba_lookup(phys_dev)) { + mutex_unlock(&bnx2fc_dev_lock); + return true; + } + } + mutex_unlock(&bnx2fc_dev_lock); + return false; +} + + +static struct fcoe_transport bnx2fc_transport = { + .name = {"bnx2fc"}, + .attached = false, + .list = LIST_HEAD_INIT(bnx2fc_transport.list), + .match = bnx2fc_match, + .create = bnx2fc_create, + .destroy = bnx2fc_destroy, + .enable = bnx2fc_enable, + .disable = bnx2fc_disable, +}; + +/** + * bnx2fc_percpu_thread_create - Create a receive thread for an + * online CPU + * + * @cpu: cpu index for the online cpu + */ +static void bnx2fc_percpu_thread_create(unsigned int cpu) +{ + struct bnx2fc_percpu_s *p; + struct task_struct *thread; + + p = &per_cpu(bnx2fc_percpu, cpu); + + thread = kthread_create(bnx2fc_percpu_io_thread, + (void *)p, + "bnx2fc_thread/%d", cpu); + /* bind thread to the cpu */ + if (likely(!IS_ERR(p->iothread))) { + kthread_bind(thread, cpu); + p->iothread = thread; + wake_up_process(thread); + } +} + +static void bnx2fc_percpu_thread_destroy(unsigned int cpu) +{ + struct bnx2fc_percpu_s *p; + struct task_struct *thread; + struct bnx2fc_work *work, *tmp; + LIST_HEAD(work_list); + + BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu); + + /* Prevent any new work from being queued for this CPU */ + p = &per_cpu(bnx2fc_percpu, cpu); + spin_lock_bh(&p->fp_work_lock); + thread = p->iothread; + p->iothread = NULL; + + + /* Free all work in the list */ + list_for_each_entry_safe(work, tmp, &work_list, list) { + list_del_init(&work->list); + bnx2fc_process_cq_compl(work->tgt, work->wqe); + kfree(work); + } + + spin_unlock_bh(&p->fp_work_lock); + + if (thread) + kthread_stop(thread); +} + +/** + * bnx2fc_cpu_callback - Handler for CPU hotplug events + * + * @nfb: The callback data block + * @action: The event triggering the callback + * @hcpu: The index of the CPU that the event is for + * + * This creates or destroys per-CPU data for fcoe + * + * Returns NOTIFY_OK always. + */ +static int bnx2fc_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + printk(PFX "CPU %x online: Create Rx thread\n", cpu); + bnx2fc_percpu_thread_create(cpu); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); + bnx2fc_percpu_thread_destroy(cpu); + break; + default: + break; + } + return NOTIFY_OK; +} + +/** + * bnx2fc_mod_init - module init entry point + * + * Initialize driver wide global data structures, and register + * with cnic module + **/ +static int __init bnx2fc_mod_init(void) +{ + struct fcoe_percpu_s *bg; + struct task_struct *l2_thread; + int rc = 0; + unsigned int cpu = 0; + struct bnx2fc_percpu_s *p; + + printk(KERN_INFO PFX "%s", version); + + /* register as a fcoe transport */ + rc = fcoe_transport_attach(&bnx2fc_transport); + if (rc) { + printk(KERN_ERR "failed to register an fcoe transport, check " + "if libfcoe is loaded\n"); + goto out; + } + + INIT_LIST_HEAD(&adapter_list); + mutex_init(&bnx2fc_dev_lock); + adapter_count = 0; + + /* Attach FC transport template */ + rc = bnx2fc_attach_transport(); + if (rc) + goto detach_ft; + + bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0); + if (!bnx2fc_wq) { + rc = -ENOMEM; + goto release_bt; + } + + bg = &bnx2fc_global; + skb_queue_head_init(&bg->fcoe_rx_list); + l2_thread = kthread_create(bnx2fc_l2_rcv_thread, + (void *)bg, + "bnx2fc_l2_thread"); + if (IS_ERR(l2_thread)) { + rc = PTR_ERR(l2_thread); + goto free_wq; + } + wake_up_process(l2_thread); + spin_lock_bh(&bg->fcoe_rx_list.lock); + bg->thread = l2_thread; + spin_unlock_bh(&bg->fcoe_rx_list.lock); + + for_each_possible_cpu(cpu) { + p = &per_cpu(bnx2fc_percpu, cpu); + INIT_LIST_HEAD(&p->work_list); + spin_lock_init(&p->fp_work_lock); + } + + for_each_online_cpu(cpu) { + bnx2fc_percpu_thread_create(cpu); + } + + /* Initialize per CPU interrupt thread */ + register_hotcpu_notifier(&bnx2fc_cpu_notifier); + + cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); + + return 0; + +free_wq: + destroy_workqueue(bnx2fc_wq); +release_bt: + bnx2fc_release_transport(); +detach_ft: + fcoe_transport_detach(&bnx2fc_transport); +out: + return rc; +} + +static void __exit bnx2fc_mod_exit(void) +{ + LIST_HEAD(to_be_deleted); + struct bnx2fc_hba *hba, *next; + struct fcoe_percpu_s *bg; + struct task_struct *l2_thread; + struct sk_buff *skb; + unsigned int cpu = 0; + + /* + * NOTE: Since cnic calls register_driver routine rtnl_lock, + * it will have higher precedence than bnx2fc_dev_lock. + * unregister_device() cannot be called with bnx2fc_dev_lock + * held. + */ + mutex_lock(&bnx2fc_dev_lock); + list_splice(&adapter_list, &to_be_deleted); + INIT_LIST_HEAD(&adapter_list); + adapter_count = 0; + mutex_unlock(&bnx2fc_dev_lock); + + /* Unregister with cnic */ + list_for_each_entry_safe(hba, next, &to_be_deleted, link) { + list_del_init(&hba->link); + printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p, kref = %d\n", + hba, atomic_read(&hba->kref.refcount)); + bnx2fc_ulp_stop(hba); + /* unregister cnic device */ + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, + &hba->reg_with_cnic)) + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); + bnx2fc_interface_destroy(hba); + } + cnic_unregister_driver(CNIC_ULP_FCOE); + + /* Destroy global thread */ + bg = &bnx2fc_global; + spin_lock_bh(&bg->fcoe_rx_list.lock); + l2_thread = bg->thread; + bg->thread = NULL; + while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) + kfree_skb(skb); + + spin_unlock_bh(&bg->fcoe_rx_list.lock); + + if (l2_thread) + kthread_stop(l2_thread); + + unregister_hotcpu_notifier(&bnx2fc_cpu_notifier); + + /* Destroy per cpu threads */ + for_each_online_cpu(cpu) { + bnx2fc_percpu_thread_destroy(cpu); + } + + destroy_workqueue(bnx2fc_wq); + /* + * detach from scsi transport + * must happen after all destroys are done + */ + bnx2fc_release_transport(); + + /* detach from fcoe transport */ + fcoe_transport_detach(&bnx2fc_transport); +} + +module_init(bnx2fc_mod_init); +module_exit(bnx2fc_mod_exit); + +static struct fc_function_template bnx2fc_transport_function = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct bnx2fc_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = bnx2fc_get_host_stats, + + .issue_fc_host_lip = bnx2fc_fcoe_reset, + + .terminate_rport_io = fc_rport_terminate_io, + + .vport_create = bnx2fc_vport_create, + .vport_delete = bnx2fc_vport_destroy, + .vport_disable = bnx2fc_vport_disable, +}; + +static struct fc_function_template bnx2fc_vport_xport_function = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct bnx2fc_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = bnx2fc_fcoe_reset, + .terminate_rport_io = fc_rport_terminate_io, +}; + +/** + * scsi_host_template structure used while registering with SCSI-ml + */ +static struct scsi_host_template bnx2fc_shost_template = { + .module = THIS_MODULE, + .name = "Broadcom Offload FCoE Initiator", + .queuecommand = bnx2fc_queuecommand, + .eh_abort_handler = bnx2fc_eh_abort, /* abts */ + .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ + .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ + .eh_host_reset_handler = fc_eh_host_reset, + .slave_alloc = fc_slave_alloc, + .change_queue_depth = fc_change_queue_depth, + .change_queue_type = fc_change_queue_type, + .this_id = -1, + .cmd_per_lun = 3, + .can_queue = (BNX2FC_MAX_OUTSTANDING_CMNDS/2), + .use_clustering = ENABLE_CLUSTERING, + .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, + .max_sectors = 512, +}; + +static struct libfc_function_template bnx2fc_libfc_fcn_templ = { + .frame_send = bnx2fc_xmit, + .elsct_send = bnx2fc_elsct_send, + .fcp_abort_io = bnx2fc_abort_io, + .fcp_cleanup = bnx2fc_cleanup, + .rport_event_callback = bnx2fc_rport_event_handler, +}; + +/** + * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface + * structure carrying callback function pointers + */ +static struct cnic_ulp_ops bnx2fc_cnic_cb = { + .owner = THIS_MODULE, + .cnic_init = bnx2fc_ulp_init, + .cnic_exit = bnx2fc_ulp_exit, + .cnic_start = bnx2fc_ulp_start, + .cnic_stop = bnx2fc_ulp_stop, + .indicate_kcqes = bnx2fc_indicate_kcqe, + .indicate_netevent = bnx2fc_indicate_netevent, +}; diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c new file mode 100644 index 000000000000..4f4096836742 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -0,0 +1,1868 @@ +/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver. + * This file contains the code that low level functions that interact + * with 57712 FCoE firmware. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); + +static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, + struct fcoe_kcqe *new_cqe_kcqe); +static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe); +static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe); +static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); +static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *conn_destroy); + +int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_stat stat_req; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = 0; + + memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); + stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; + stat_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; + stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); + + kwqe_arr[0] = (struct kwqe *) &stat_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w + * + * @hba: adapter structure pointer + * + * Send down FCoE firmware init KWQEs which initiates the initial handshake + * with the f/w. + * + */ +int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_init1 fcoe_init1; + struct fcoe_kwqe_init2 fcoe_init2; + struct fcoe_kwqe_init3 fcoe_init3; + struct kwqe *kwqe_arr[3]; + int num_kwqes = 3; + int rc = 0; + + if (!hba->cnic) { + printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n"); + return -ENODEV; + } + + /* fill init1 KWQE */ + memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); + fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; + fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + fcoe_init1.num_tasks = BNX2FC_MAX_TASKS; + fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; + fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; + fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; + fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; + fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; + fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); + fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; + fcoe_init1.task_list_pbl_addr_hi = + (u32) ((u64) hba->task_ctx_bd_dma >> 32); + fcoe_init1.mtu = hba->netdev->mtu; + + fcoe_init1.flags = (PAGE_SHIFT << + FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); + + fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; + + /* fill init2 KWQE */ + memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); + fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; + fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; + fcoe_init2.hash_tbl_pbl_addr_hi = (u32) + ((u64) hba->hash_tbl_pbl_dma >> 32); + + fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; + fcoe_init2.t2_hash_tbl_addr_hi = (u32) + ((u64) hba->t2_hash_tbl_dma >> 32); + + fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; + fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) + ((u64) hba->t2_hash_tbl_ptr_dma >> 32); + + fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; + + /* fill init3 KWQE */ + memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); + fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; + fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + fcoe_init3.error_bit_map_lo = 0xffffffff; + fcoe_init3.error_bit_map_hi = 0xffffffff; + + + kwqe_arr[0] = (struct kwqe *) &fcoe_init1; + kwqe_arr[1] = (struct kwqe *) &fcoe_init2; + kwqe_arr[2] = (struct kwqe *) &fcoe_init3; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} +int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_destroy fcoe_destroy; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = -1; + + /* fill destroy KWQE */ + memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); + fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; + fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + return rc; +} + +/** + * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_ofld_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct fc_lport *lport = port->lport; + struct bnx2fc_hba *hba = port->priv; + struct kwqe *kwqe_arr[4]; + struct fcoe_kwqe_conn_offload1 ofld_req1; + struct fcoe_kwqe_conn_offload2 ofld_req2; + struct fcoe_kwqe_conn_offload3 ofld_req3; + struct fcoe_kwqe_conn_offload4 ofld_req4; + struct fc_rport_priv *rdata = tgt->rdata; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 4; + u32 port_id; + int rc = 0; + u16 conn_id; + + /* Initialize offload request 1 structure */ + memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); + + ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; + ofld_req1.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + + conn_id = (u16)tgt->fcoe_conn_id; + ofld_req1.fcoe_conn_id = conn_id; + + + ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; + ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); + + ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; + ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); + + ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; + ofld_req1.rq_first_pbe_addr_hi = + (u32)((u64) tgt->rq_dma >> 32); + + ofld_req1.rq_prod = 0x8000; + + /* Initialize offload request 2 structure */ + memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); + + ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; + ofld_req2.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; + + ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; + ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); + + ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; + ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); + + ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; + ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); + + /* Initialize offload request 3 structure */ + memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); + + ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; + ofld_req3.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req3.vlan_tag = hba->vlan_id << + FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; + ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; + + port_id = fc_host_port_id(lport->host); + if (port_id == 0) { + BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); + return -EINVAL; + } + + /* + * Store s_id of the initiator for further reference. This will + * be used during disable/destroy during linkdown processing as + * when the lport is reset, the port_id also is reset to 0 + */ + tgt->sid = port_id; + ofld_req3.s_id[0] = (port_id & 0x000000FF); + ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; + ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; + + port_id = rport->port_id; + ofld_req3.d_id[0] = (port_id & 0x000000FF); + ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; + ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; + + ofld_req3.tx_total_conc_seqs = rdata->max_seq; + + ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; + ofld_req3.rx_max_fc_pay_len = lport->mfs; + + ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; + ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; + ofld_req3.rx_open_seqs_exch_c3 = 1; + + ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; + ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); + + /* set mul_n_port_ids supported flag to 0, until it is supported */ + ofld_req3.flags = 0; + /* + ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << + FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); + */ + /* Info from PLOGI response */ + ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); + + ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); + + /* vlan flag */ + ofld_req3.flags |= (hba->vlan_enabled << + FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); + + /* C2_VALID and ACK flags are not set as they are not suppported */ + + + /* Initialize offload request 4 structure */ + memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); + ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; + ofld_req4.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; + + + ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5]; + /* local mac */ + ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4]; + ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3]; + ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2]; + ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1]; + ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0]; + ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ + ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; + ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; + ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; + ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; + ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; + + ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; + ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); + + ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; + ofld_req4.confq_pbl_base_addr_hi = + (u32)((u64) tgt->confq_pbl_dma >> 32); + + kwqe_arr[0] = (struct kwqe *) &ofld_req1; + kwqe_arr[1] = (struct kwqe *) &ofld_req2; + kwqe_arr[2] = (struct kwqe *) &ofld_req3; + kwqe_arr[3] = (struct kwqe *) &ofld_req4; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_session_enable_req - initiates FCoE Session enablement + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +static int bnx2fc_send_session_enable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct kwqe *kwqe_arr[2]; + struct bnx2fc_hba *hba = port->priv; + struct fcoe_kwqe_conn_enable_disable enbl_req; + struct fc_lport *lport = port->lport; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 1; + int rc = 0; + u32 port_id; + + memset(&enbl_req, 0x00, + sizeof(struct fcoe_kwqe_conn_enable_disable)); + enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; + enbl_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; + /* local mac */ + enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4]; + enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; + enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; + enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; + enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; + + enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ + enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; + enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; + enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; + enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; + enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; + + port_id = fc_host_port_id(lport->host); + if (port_id != tgt->sid) { + printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," + "sid = 0x%x\n", port_id, tgt->sid); + port_id = tgt->sid; + } + enbl_req.s_id[0] = (port_id & 0x000000FF); + enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; + enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; + + port_id = rport->port_id; + enbl_req.d_id[0] = (port_id & 0x000000FF); + enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; + enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; + enbl_req.vlan_tag = hba->vlan_id << + FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; + enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; + enbl_req.vlan_flag = hba->vlan_enabled; + enbl_req.context_id = tgt->context_id; + enbl_req.conn_id = tgt->fcoe_conn_id; + + kwqe_arr[0] = (struct kwqe *) &enbl_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + return rc; +} + +/** + * bnx2fc_send_session_disable_req - initiates FCoE Session disable + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_disable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct bnx2fc_hba *hba = port->priv; + struct fcoe_kwqe_conn_enable_disable disable_req; + struct kwqe *kwqe_arr[2]; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 1; + int rc = 0; + u32 port_id; + + memset(&disable_req, 0x00, + sizeof(struct fcoe_kwqe_conn_enable_disable)); + disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; + disable_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5]; + disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3]; + disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2]; + disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1]; + disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0]; + + disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */ + disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4]; + disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3]; + disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2]; + disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1]; + disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0]; + + port_id = tgt->sid; + disable_req.s_id[0] = (port_id & 0x000000FF); + disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; + disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; + + + port_id = rport->port_id; + disable_req.d_id[0] = (port_id & 0x000000FF); + disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; + disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; + disable_req.context_id = tgt->context_id; + disable_req.conn_id = tgt->fcoe_conn_id; + disable_req.vlan_tag = hba->vlan_id << + FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; + disable_req.vlan_tag |= + 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; + disable_req.vlan_flag = hba->vlan_enabled; + + kwqe_arr[0] = (struct kwqe *) &disable_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + struct fcoe_kwqe_conn_destroy destroy_req; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = 0; + + memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); + destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; + destroy_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + destroy_req.context_id = tgt->context_id; + destroy_req.conn_id = tgt->fcoe_conn_id; + + kwqe_arr[0] = (struct kwqe *) &destroy_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +static void bnx2fc_unsol_els_work(struct work_struct *work) +{ + struct bnx2fc_unsol_els *unsol_els; + struct fc_lport *lport; + struct fc_frame *fp; + + unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); + lport = unsol_els->lport; + fp = unsol_els->fp; + fc_exch_recv(lport, fp); + kfree(unsol_els); +} + +void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, + unsigned char *buf, + u32 frame_len, u16 l2_oxid) +{ + struct fcoe_port *port = tgt->port; + struct fc_lport *lport = port->lport; + struct bnx2fc_unsol_els *unsol_els; + struct fc_frame_header *fh; + struct fc_frame *fp; + struct sk_buff *skb; + u32 payload_len; + u32 crc; + u8 op; + + + unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); + if (!unsol_els) { + BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", + l2_oxid, frame_len); + + payload_len = frame_len - sizeof(struct fc_frame_header); + + fp = fc_frame_alloc(lport, payload_len); + if (!fp) { + printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + return; + } + + fh = (struct fc_frame_header *) fc_frame_header_get(fp); + /* Copy FC Frame header and payload into the frame */ + memcpy(fh, buf, frame_len); + + if (l2_oxid != FC_XID_UNKNOWN) + fh->fh_ox_id = htons(l2_oxid); + + skb = fp_skb(fp); + + if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || + (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { + + if (fh->fh_type == FC_TYPE_ELS) { + op = fc_frame_payload_op(fp); + if ((op == ELS_TEST) || (op == ELS_ESTC) || + (op == ELS_FAN) || (op == ELS_CSU)) { + /* + * No need to reply for these + * ELS requests + */ + printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); + kfree_skb(skb); + return; + } + } + crc = fcoe_fc_crc(fp); + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_crc(fp) = cpu_to_le32(~crc); + unsol_els->lport = lport; + unsol_els->fp = fp; + INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); + queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); + } else { + BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); + kfree_skb(skb); + } +} + +static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) +{ + u8 num_rq; + struct fcoe_err_report_entry *err_entry; + unsigned char *rq_data; + unsigned char *buf = NULL, *buf1; + int i; + u16 xid; + u32 frame_len, len; + struct bnx2fc_cmd *io_req = NULL; + struct fcoe_task_ctx_entry *task, *task_page; + struct bnx2fc_hba *hba = tgt->port->priv; + int task_idx, index; + int rc = 0; + + + BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); + switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { + case FCOE_UNSOLICITED_FRAME_CQE_TYPE: + frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> + FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; + + num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; + + rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); + if (rq_data) { + buf = rq_data; + } else { + buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), + GFP_ATOMIC); + + if (!buf1) { + BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); + break; + } + + for (i = 0; i < num_rq; i++) { + rq_data = (unsigned char *) + bnx2fc_get_next_rqe(tgt, 1); + len = BNX2FC_RQ_BUF_SZ; + memcpy(buf1, rq_data, len); + buf1 += len; + } + } + bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, + FC_XID_UNKNOWN); + + if (buf != rq_data) + kfree(buf); + bnx2fc_return_rqe(tgt, num_rq); + break; + + case FCOE_ERROR_DETECTION_CQE_TYPE: + /* + *In case of error reporting CQE a single RQ entry + * is consumes. + */ + spin_lock_bh(&tgt->tgt_lock); + num_rq = 1; + err_entry = (struct fcoe_err_report_entry *) + bnx2fc_get_next_rqe(tgt, 1); + xid = err_entry->fc_hdr.ox_id; + BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); + BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", + err_entry->err_warn_bitmap_hi, + err_entry->err_warn_bitmap_lo); + BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", + err_entry->tx_buf_off, err_entry->rx_buf_off); + + bnx2fc_return_rqe(tgt, 1); + + if (xid > BNX2FC_MAX_XID) { + BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", + xid); + spin_unlock_bh(&tgt->tgt_lock); + break; + } + + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + task_page = (struct fcoe_task_ctx_entry *) + hba->task_ctx[task_idx]; + task = &(task_page[index]); + + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + if (!io_req) { + spin_unlock_bh(&tgt->tgt_lock); + break; + } + + if (io_req->cmd_type != BNX2FC_SCSI_CMD) { + printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); + spin_unlock_bh(&tgt->tgt_lock); + break; + } + + if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " + "progress.. ignore unsol err\n"); + spin_unlock_bh(&tgt->tgt_lock); + break; + } + + /* + * If ABTS is already in progress, and FW error is + * received after that, do not cancel the timeout_work + * and let the error recovery continue by explicitly + * logging out the target, when the ABTS eventually + * times out. + */ + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &io_req->req_flags)) { + /* + * Cancel the timeout_work, as we received IO + * completion with FW error. + */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* timer hold */ + + rc = bnx2fc_initiate_abts(io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts " + "failed. issue cleanup\n"); + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } + } else + printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " + "in ABTS processing\n", xid); + spin_unlock_bh(&tgt->tgt_lock); + break; + + case FCOE_WARNING_DETECTION_CQE_TYPE: + /* + *In case of warning reporting CQE a single RQ entry + * is consumes. + */ + num_rq = 1; + err_entry = (struct fcoe_err_report_entry *) + bnx2fc_get_next_rqe(tgt, 1); + xid = cpu_to_be16(err_entry->fc_hdr.ox_id); + BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); + BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", + err_entry->err_warn_bitmap_hi, + err_entry->err_warn_bitmap_lo); + BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", + err_entry->tx_buf_off, err_entry->rx_buf_off); + + bnx2fc_return_rqe(tgt, 1); + break; + + default: + printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); + break; + } +} + +void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) +{ + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct fcoe_port *port = tgt->port; + struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_cmd *io_req; + int task_idx, index; + u16 xid; + u8 cmd_type; + u8 rx_state = 0; + u8 num_rq; + + spin_lock_bh(&tgt->tgt_lock); + xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; + if (xid >= BNX2FC_MAX_TASKS) { + printk(KERN_ALERT PFX "ERROR:xid out of range\n"); + spin_unlock_bh(&tgt->tgt_lock); + return; + } + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; + task = &(task_page[index]); + + num_rq = ((task->rx_wr_tx_rd.rx_flags & + FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >> + FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT); + + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + + if (io_req == NULL) { + printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); + spin_unlock_bh(&tgt->tgt_lock); + return; + } + + /* Timestamp IO completion time */ + cmd_type = io_req->cmd_type; + + /* optimized completion path */ + if (cmd_type == BNX2FC_SCSI_CMD) { + rx_state = ((task->rx_wr_tx_rd.rx_flags & + FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >> + FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT); + + if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { + bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); + spin_unlock_bh(&tgt->tgt_lock); + return; + } + } + + /* Process other IO completion types */ + switch (cmd_type) { + case BNX2FC_SCSI_CMD: + if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) + bnx2fc_process_abts_compl(io_req, task, num_rq); + else if (rx_state == + FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) + bnx2fc_process_cleanup_compl(io_req, task, num_rq); + else + printk(KERN_ERR PFX "Invalid rx state - %d\n", + rx_state); + break; + + case BNX2FC_TASK_MGMT_CMD: + BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); + bnx2fc_process_tm_compl(io_req, task, num_rq); + break; + + case BNX2FC_ABTS: + /* + * ABTS request received by firmware. ABTS response + * will be delivered to the task belonging to the IO + * that was aborted + */ + BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + + case BNX2FC_ELS: + BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n"); + bnx2fc_process_els_compl(io_req, task, num_rq); + break; + + case BNX2FC_CLEANUP: + BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + + default: + printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); + break; + } + spin_unlock_bh(&tgt->tgt_lock); +} + +struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) +{ + struct bnx2fc_work *work; + work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); + if (!work) + return NULL; + + INIT_LIST_HEAD(&work->list); + work->tgt = tgt; + work->wqe = wqe; + return work; +} + +int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) +{ + struct fcoe_cqe *cq; + u32 cq_cons; + struct fcoe_cqe *cqe; + u16 wqe; + bool more_cqes_found = false; + + /* + * cq_lock is a low contention lock used to protect + * the CQ data structure from being freed up during + * the upload operation + */ + spin_lock_bh(&tgt->cq_lock); + + if (!tgt->cq) { + printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); + spin_unlock_bh(&tgt->cq_lock); + return 0; + } + cq = tgt->cq; + cq_cons = tgt->cq_cons_idx; + cqe = &cq[cq_cons]; + + do { + more_cqes_found ^= true; + + while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == + (tgt->cq_curr_toggle_bit << + FCOE_CQE_TOGGLE_BIT_SHIFT)) { + + /* new entry on the cq */ + if (wqe & FCOE_CQE_CQE_TYPE) { + /* Unsolicited event notification */ + bnx2fc_process_unsol_compl(tgt, wqe); + } else { + struct bnx2fc_work *work = NULL; + struct bnx2fc_percpu_s *fps = NULL; + unsigned int cpu = wqe % num_possible_cpus(); + + fps = &per_cpu(bnx2fc_percpu, cpu); + spin_lock_bh(&fps->fp_work_lock); + if (unlikely(!fps->iothread)) + goto unlock; + + work = bnx2fc_alloc_work(tgt, wqe); + if (work) + list_add_tail(&work->list, + &fps->work_list); +unlock: + spin_unlock_bh(&fps->fp_work_lock); + + /* Pending work request completion */ + if (fps->iothread && work) + wake_up_process(fps->iothread); + else + bnx2fc_process_cq_compl(tgt, wqe); + } + cqe++; + tgt->cq_cons_idx++; + + if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { + tgt->cq_cons_idx = 0; + cqe = cq; + tgt->cq_curr_toggle_bit = + 1 - tgt->cq_curr_toggle_bit; + } + } + /* Re-arm CQ */ + if (more_cqes_found) { + tgt->conn_db->cq_arm.lo = -1; + wmb(); + } + } while (more_cqes_found); + + /* + * Commit tgt->cq_cons_idx change to the memory + * spin_lock implies full memory barrier, no need to smp_wmb + */ + + spin_unlock_bh(&tgt->cq_lock); + return 0; +} + +/** + * bnx2fc_fastpath_notification - process global event queue (KCQ) + * + * @hba: adapter structure pointer + * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry + * + * Fast path event notification handler + */ +static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, + struct fcoe_kcqe *new_cqe_kcqe) +{ + u32 conn_id = new_cqe_kcqe->fcoe_conn_id; + struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; + + if (!tgt) { + printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id); + return; + } + + bnx2fc_process_new_cqes(tgt); +} + +/** + * bnx2fc_process_ofld_cmpl - process FCoE session offload completion + * + * @hba: adapter structure pointer + * @ofld_kcqe: connection offload kcqe pointer + * + * handle session offload completion, enable the session if offload is + * successful. + */ +static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe) +{ + struct bnx2fc_rport *tgt; + struct fcoe_port *port; + u32 conn_id; + u32 context_id; + int rc; + + conn_id = ofld_kcqe->fcoe_conn_id; + context_id = ofld_kcqe->fcoe_conn_context_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); + return; + } + BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", + ofld_kcqe->fcoe_conn_context_id); + port = tgt->port; + if (hba != tgt->port->priv) { + printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n"); + goto ofld_cmpl_err; + } + /* + * cnic has allocated a context_id for this session; use this + * while enabling the session. + */ + tgt->context_id = context_id; + if (ofld_kcqe->completion_status) { + if (ofld_kcqe->completion_status == + FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { + printk(KERN_ERR PFX "unable to allocate FCoE context " + "resources\n"); + set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); + } + goto ofld_cmpl_err; + } else { + + /* now enable the session */ + rc = bnx2fc_send_session_enable_req(port, tgt); + if (rc) { + printk(KERN_ALERT PFX "enable session failed\n"); + goto ofld_cmpl_err; + } + } + return; +ofld_cmpl_err: + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +/** + * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion + * + * @hba: adapter structure pointer + * @ofld_kcqe: connection offload kcqe pointer + * + * handle session enable completion, mark the rport as ready + */ + +static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe) +{ + struct bnx2fc_rport *tgt; + u32 conn_id; + u32 context_id; + + context_id = ofld_kcqe->fcoe_conn_context_id; + conn_id = ofld_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", + ofld_kcqe->fcoe_conn_context_id); + + /* + * context_id should be the same for this target during offload + * and enable + */ + if (tgt->context_id != context_id) { + printk(KERN_ALERT PFX "context id mis-match\n"); + return; + } + if (hba != tgt->port->priv) { + printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); + goto enbl_cmpl_err; + } + if (ofld_kcqe->completion_status) { + goto enbl_cmpl_err; + } else { + /* enable successful - rport ready for issuing IOs */ + set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); + } + return; + +enbl_cmpl_err: + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *disable_kcqe) +{ + + struct bnx2fc_rport *tgt; + u32 conn_id; + + conn_id = disable_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); + + if (disable_kcqe->completion_status) { + printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n", + disable_kcqe->completion_status); + return; + } else { + /* disable successful */ + BNX2FC_TGT_DBG(tgt, "disable successful\n"); + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); + } +} + +static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *destroy_kcqe) +{ + struct bnx2fc_rport *tgt; + u32 conn_id; + + conn_id = destroy_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); + + if (destroy_kcqe->completion_status) { + printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n", + destroy_kcqe->completion_status); + return; + } else { + /* destroy successful */ + BNX2FC_TGT_DBG(tgt, "upload successful\n"); + clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); + } +} + +static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) +{ + switch (err_code) { + case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: + printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); + break; + + case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: + printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); + break; + + case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: + printk(KERN_ERR PFX "init_failure due to NIC error\n"); + break; + + default: + printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); + } +} + +/** + * bnx2fc_indicae_kcqe - process KCQE + * + * @hba: adapter structure pointer + * @kcqe: kcqe pointer + * @num_cqe: Number of completion queue elements + * + * Generic KCQ event handler + */ +void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], + u32 num_cqe) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; + int i = 0; + struct fcoe_kcqe *kcqe = NULL; + + while (i < num_cqe) { + kcqe = (struct fcoe_kcqe *) kcq[i++]; + + switch (kcqe->op_code) { + case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: + bnx2fc_fastpath_notification(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_OFFLOAD_CONN: + bnx2fc_process_ofld_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_ENABLE_CONN: + bnx2fc_process_enable_conn_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_INIT_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { + bnx2fc_init_failure(hba, + kcqe->completion_status); + } else { + set_bit(ADAPTER_STATE_UP, &hba->adapter_state); + bnx2fc_get_link_state(hba); + printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", + (u8)hba->pcidev->bus->number); + } + break; + + case FCOE_KCQE_OPCODE_DESTROY_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { + + printk(KERN_ERR PFX "DESTROY failed\n"); + } else { + printk(KERN_ERR PFX "DESTROY success\n"); + } + hba->flags |= BNX2FC_FLAG_DESTROY_CMPL; + wake_up_interruptible(&hba->destroy_wait); + break; + + case FCOE_KCQE_OPCODE_DISABLE_CONN: + bnx2fc_process_conn_disable_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_DESTROY_CONN: + bnx2fc_process_conn_destroy_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_STAT_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) + printk(KERN_ERR PFX "STAT failed\n"); + complete(&hba->stat_req_done); + break; + + case FCOE_KCQE_OPCODE_FCOE_ERROR: + /* fall thru */ + default: + printk(KERN_ALERT PFX "unknown opcode 0x%x\n", + kcqe->op_code); + } + } +} + +void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) +{ + struct fcoe_sqe *sqe; + + sqe = &tgt->sq[tgt->sq_prod_idx]; + + /* Fill SQ WQE */ + sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; + sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; + + /* Advance SQ Prod Idx */ + if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { + tgt->sq_prod_idx = 0; + tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; + } +} + +void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) +{ + struct b577xx_doorbell_set_prod ev_doorbell; + u32 msg; + + wmb(); + + memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod)); + ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE; + + ev_doorbell.prod = tgt->sq_prod_idx | + (tgt->sq_curr_toggle_bit << 15); + ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; + msg = *((u32 *)&ev_doorbell); + writel(cpu_to_le32(msg), tgt->ctx_base); + + mmiowb(); + +} + +int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) +{ + u32 context_id = tgt->context_id; + struct fcoe_port *port = tgt->port; + u32 reg_off; + resource_size_t reg_base; + struct bnx2fc_hba *hba = port->priv; + + reg_base = pci_resource_start(hba->pcidev, + BNX2X_DOORBELL_PCI_BAR); + reg_off = BNX2FC_5771X_DB_PAGE_SIZE * + (context_id & 0x1FFFF) + DPM_TRIGER_TYPE; + tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); + if (!tgt->ctx_base) + return -ENOMEM; + return 0; +} + +char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) +{ + char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); + + if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) + return NULL; + + tgt->rq_cons_idx += num_items; + + if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) + tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; + + return buf; +} + +void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) +{ + /* return the rq buffer */ + u32 next_prod_idx = tgt->rq_prod_idx + num_items; + if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { + /* Wrap around RQ */ + next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; + } + tgt->rq_prod_idx = next_prod_idx; + tgt->conn_db->rq_prod = tgt->rq_prod_idx; +} + +void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u16 orig_xid) +{ + u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; + struct bnx2fc_rport *tgt = io_req->tgt; + u32 context_id = tgt->context_id; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Tx Write Rx Read */ + task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; + task->tx_wr_rx_rd.init_flags = task_type << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; + task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; + /* Common */ + task->cmn.common_flags = context_id << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; + task->cmn.general.cleanup_info.task_id = orig_xid; + + +} + +void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task) +{ + struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); + struct bnx2fc_rport *tgt = io_req->tgt; + struct fc_frame_header *fc_hdr; + u8 task_type = 0; + u64 *hdr; + u64 temp_hdr[3]; + u32 context_id; + + + /* Obtain task_type */ + if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || + (io_req->cmd_type == BNX2FC_ELS)) { + task_type = FCOE_TASK_TYPE_MIDPATH; + } else if (io_req->cmd_type == BNX2FC_ABTS) { + task_type = FCOE_TASK_TYPE_ABTS; + } + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task; + + BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", + io_req->cmd_type, task_type); + + /* Tx only */ + if ((task_type == FCOE_TASK_TYPE_MIDPATH) || + (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { + task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = + (u32)mp_req->mp_req_bd_dma; + task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + (u32)((u64)mp_req->mp_req_bd_dma >> 32); + task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; + BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n", + (unsigned long long)mp_req->mp_req_bd_dma); + } + + /* Tx Write Rx Read */ + task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; + task->tx_wr_rx_rd.init_flags = task_type << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; + task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; + task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; + + /* Common */ + task->cmn.data_2_trns = io_req->data_xfer_len; + context_id = tgt->context_id; + task->cmn.common_flags = context_id << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; + task->cmn.common_flags |= 1 << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; + task->cmn.common_flags |= 1 << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; + + /* Rx Write Tx Read */ + fc_hdr = &(mp_req->req_fc_hdr); + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); + fc_hdr->fh_rx_id = htons(0xffff); + task->rx_wr_tx_rd.rx_id = 0xffff; + } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { + fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); + } + + /* Fill FC Header into middle path buffer */ + hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; + memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + /* Rx Only */ + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + + task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = + (u32)mp_req->mp_resp_bd_dma; + task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + (u32)((u64)mp_req->mp_resp_bd_dma >> 32); + task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1; + } +} + +void bnx2fc_init_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task) +{ + u8 task_type; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct io_bdt *bd_tbl = io_req->bd_tbl; + struct bnx2fc_rport *tgt = io_req->tgt; + u64 *fcp_cmnd; + u64 tmp_fcp_cmnd[4]; + u32 context_id; + int cnt, i; + int bd_count; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task; + + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + task_type = FCOE_TASK_TYPE_WRITE; + else + task_type = FCOE_TASK_TYPE_READ; + + /* Tx only */ + if (task_type == FCOE_TASK_TYPE_WRITE) { + task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = + (u32)bd_tbl->bd_tbl_dma; + task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = + bd_tbl->bd_valid; + } + + /*Tx Write Rx Read */ + /* Init state to NORMAL */ + task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT; + task->tx_wr_rx_rd.init_flags = task_type << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT; + task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT; + task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT; + + /* Common */ + task->cmn.data_2_trns = io_req->data_xfer_len; + context_id = tgt->context_id; + task->cmn.common_flags = context_id << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT; + task->cmn.common_flags |= 1 << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT; + task->cmn.common_flags |= 1 << + FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT; + + /* Set initiative ownership */ + task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT; + + /* Set initial seq counter */ + task->cmn.tx_low_seq_cnt = 1; + + /* Set state to "waiting for the first packet" */ + task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME; + + /* Fill FCP_CMND IU */ + fcp_cmnd = (u64 *) + task->cmn.general.cmd_info.fcp_cmd_payload.opaque; + bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); + + /* swap fcp_cmnd */ + cnt = sizeof(struct fcp_cmnd) / sizeof(u64); + + for (i = 0; i < cnt; i++) { + *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); + fcp_cmnd++; + } + + /* Rx Write Tx Read */ + task->rx_wr_tx_rd.rx_id = 0xffff; + + /* Rx Only */ + if (task_type == FCOE_TASK_TYPE_READ) { + + bd_count = bd_tbl->bd_valid; + if (bd_count == 1) { + + struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; + + task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo = + fcoe_bd_tbl->buf_addr_lo; + task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi = + fcoe_bd_tbl->buf_addr_hi; + task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem = + fcoe_bd_tbl->buf_len; + task->tx_wr_rx_rd.init_flags |= 1 << + FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT; + } else { + + task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo = + (u32)bd_tbl->bd_tbl_dma; + task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = + bd_tbl->bd_valid; + } + } +} + +/** + * bnx2fc_setup_task_ctx - allocate and map task context + * + * @hba: pointer to adapter structure + * + * allocate memory for task context, and associated BD table to be used + * by firmware + * + */ +int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) +{ + int rc = 0; + struct regpair *task_ctx_bdt; + dma_addr_t addr; + int i; + + /* + * Allocate task context bd table. A page size of bd table + * can map 256 buffers. Each buffer contains 32 task context + * entries. Hence the limit with one page is 8192 task context + * entries. + */ + hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->task_ctx_bd_dma, + GFP_KERNEL); + if (!hba->task_ctx_bd_tbl) { + printk(KERN_ERR PFX "unable to allocate task context BDT\n"); + rc = -1; + goto out; + } + memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE); + + /* + * Allocate task_ctx which is an array of pointers pointing to + * a page containing 32 task contexts + */ + hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)), + GFP_KERNEL); + if (!hba->task_ctx) { + printk(KERN_ERR PFX "unable to allocate task context array\n"); + rc = -1; + goto out1; + } + + /* + * Allocate task_ctx_dma which is an array of dma addresses + */ + hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ * + sizeof(dma_addr_t)), GFP_KERNEL); + if (!hba->task_ctx_dma) { + printk(KERN_ERR PFX "unable to alloc context mapping array\n"); + rc = -1; + goto out2; + } + + task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; + for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { + + hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->task_ctx_dma[i], + GFP_KERNEL); + if (!hba->task_ctx[i]) { + printk(KERN_ERR PFX "unable to alloc task context\n"); + rc = -1; + goto out3; + } + memset(hba->task_ctx[i], 0, PAGE_SIZE); + addr = (u64)hba->task_ctx_dma[i]; + task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); + task_ctx_bdt->lo = cpu_to_le32((u32)addr); + task_ctx_bdt++; + } + return 0; + +out3: + for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { + if (hba->task_ctx[i]) { + + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx[i], hba->task_ctx_dma[i]); + hba->task_ctx[i] = NULL; + } + } + + kfree(hba->task_ctx_dma); + hba->task_ctx_dma = NULL; +out2: + kfree(hba->task_ctx); + hba->task_ctx = NULL; +out1: + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); + hba->task_ctx_bd_tbl = NULL; +out: + return rc; +} + +void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) +{ + int i; + + if (hba->task_ctx_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx_bd_tbl, + hba->task_ctx_bd_dma); + hba->task_ctx_bd_tbl = NULL; + } + + if (hba->task_ctx) { + for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) { + if (hba->task_ctx[i]) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx[i], + hba->task_ctx_dma[i]); + hba->task_ctx[i] = NULL; + } + } + kfree(hba->task_ctx); + hba->task_ctx = NULL; + } + + kfree(hba->task_ctx_dma); + hba->task_ctx_dma = NULL; +} + +static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) +{ + int i; + int segment_count; + int hash_table_size; + u32 *pbl; + + segment_count = hba->hash_tbl_segment_count; + hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * + sizeof(struct fcoe_hash_table_entry); + + pbl = hba->hash_tbl_pbl; + for (i = 0; i < segment_count; ++i) { + dma_addr_t dma_address; + + dma_address = le32_to_cpu(*pbl); + ++pbl; + dma_address += ((u64)le32_to_cpu(*pbl)) << 32; + ++pbl; + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_address); + + } + + if (hba->hash_tbl_pbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->hash_tbl_pbl, + hba->hash_tbl_pbl_dma); + hba->hash_tbl_pbl = NULL; + } +} + +static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) +{ + int i; + int hash_table_size; + int segment_count; + int segment_array_size; + int dma_segment_array_size; + dma_addr_t *dma_segment_array; + u32 *pbl; + + hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * + sizeof(struct fcoe_hash_table_entry); + + segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; + segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; + hba->hash_tbl_segment_count = segment_count; + + segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); + hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); + if (!hba->hash_tbl_segments) { + printk(KERN_ERR PFX "hash table pointers alloc failed\n"); + return -ENOMEM; + } + dma_segment_array_size = segment_count * sizeof(*dma_segment_array); + dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); + if (!dma_segment_array) { + printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); + return -ENOMEM; + } + + for (i = 0; i < segment_count; ++i) { + hba->hash_tbl_segments[i] = + dma_alloc_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + &dma_segment_array[i], + GFP_KERNEL); + if (!hba->hash_tbl_segments[i]) { + printk(KERN_ERR PFX "hash segment alloc failed\n"); + while (--i >= 0) { + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_segment_array[i]); + hba->hash_tbl_segments[i] = NULL; + } + kfree(dma_segment_array); + return -ENOMEM; + } + memset(hba->hash_tbl_segments[i], 0, + BNX2FC_HASH_TBL_CHUNK_SIZE); + } + + hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->hash_tbl_pbl_dma, + GFP_KERNEL); + if (!hba->hash_tbl_pbl) { + printk(KERN_ERR PFX "hash table pbl alloc failed\n"); + kfree(dma_segment_array); + return -ENOMEM; + } + memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); + + pbl = hba->hash_tbl_pbl; + for (i = 0; i < segment_count; ++i) { + u64 paddr = dma_segment_array[i]; + *pbl = cpu_to_le32((u32) paddr); + ++pbl; + *pbl = cpu_to_le32((u32) (paddr >> 32)); + ++pbl; + } + pbl = hba->hash_tbl_pbl; + i = 0; + while (*pbl && *(pbl + 1)) { + u32 lo; + u32 hi; + lo = *pbl; + ++pbl; + hi = *pbl; + ++pbl; + ++i; + } + kfree(dma_segment_array); + return 0; +} + +/** + * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer + * + * @hba: Pointer to adapter structure + * + */ +int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) +{ + u64 addr; + u32 mem_size; + int i; + + if (bnx2fc_allocate_hash_table(hba)) + return -ENOMEM; + + mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); + hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, + &hba->t2_hash_tbl_ptr_dma, + GFP_KERNEL); + if (!hba->t2_hash_tbl_ptr) { + printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + memset(hba->t2_hash_tbl_ptr, 0x00, mem_size); + + mem_size = BNX2FC_NUM_MAX_SESS * + sizeof(struct fcoe_t2_hash_table_entry); + hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, + &hba->t2_hash_tbl_dma, + GFP_KERNEL); + if (!hba->t2_hash_tbl) { + printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + memset(hba->t2_hash_tbl, 0x00, mem_size); + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + addr = (unsigned long) hba->t2_hash_tbl_dma + + ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); + hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; + hba->t2_hash_tbl[i].next.hi = addr >> 32; + } + + hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, &hba->dummy_buf_dma, + GFP_KERNEL); + if (!hba->dummy_buffer) { + printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + + hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->stats_buf_dma, + GFP_KERNEL); + if (!hba->stats_buffer) { + printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + memset(hba->stats_buffer, 0x00, PAGE_SIZE); + + return 0; +} + +void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) +{ + u32 mem_size; + + if (hba->stats_buffer) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->stats_buffer, hba->stats_buf_dma); + hba->stats_buffer = NULL; + } + + if (hba->dummy_buffer) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->dummy_buffer, hba->dummy_buf_dma); + hba->dummy_buffer = NULL; + } + + if (hba->t2_hash_tbl_ptr) { + mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); + dma_free_coherent(&hba->pcidev->dev, mem_size, + hba->t2_hash_tbl_ptr, + hba->t2_hash_tbl_ptr_dma); + hba->t2_hash_tbl_ptr = NULL; + } + + if (hba->t2_hash_tbl) { + mem_size = BNX2FC_NUM_MAX_SESS * + sizeof(struct fcoe_t2_hash_table_entry); + dma_free_coherent(&hba->pcidev->dev, mem_size, + hba->t2_hash_tbl, hba->t2_hash_tbl_dma); + hba->t2_hash_tbl = NULL; + } + bnx2fc_free_hash_table(hba); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c new file mode 100644 index 000000000000..0f1dd23730db --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -0,0 +1,1833 @@ +/* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. + * IO manager and SCSI IO processing. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" +static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, + int bd_index); +static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); +static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); +static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, + struct bnx2fc_cmd *io_req); +static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); +static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); +static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, + struct fcoe_fcp_rsp_payload *fcp_rsp, + u8 num_rq); + +void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, + unsigned int timer_msec) +{ + struct bnx2fc_hba *hba = io_req->port->priv; + + if (queue_delayed_work(hba->timer_work_queue, &io_req->timeout_work, + msecs_to_jiffies(timer_msec))) + kref_get(&io_req->refcount); +} + +static void bnx2fc_cmd_timeout(struct work_struct *work) +{ + struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, + timeout_work.work); + struct fc_lport *lport; + struct fc_rport_priv *rdata; + u8 cmd_type = io_req->cmd_type; + struct bnx2fc_rport *tgt = io_req->tgt; + int logo_issued; + int rc; + + BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," + "req_flags = %lx\n", cmd_type, io_req->req_flags); + + spin_lock_bh(&tgt->tgt_lock); + if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { + clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); + /* + * ideally we should hold the io_req until RRQ complets, + * and release io_req from timeout hold. + */ + spin_unlock_bh(&tgt->tgt_lock); + bnx2fc_send_rrq(io_req); + return; + } + if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); + goto done; + } + + switch (cmd_type) { + case BNX2FC_SCSI_CMD: + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); + complete(&io_req->tm_done); + } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, + &io_req->req_flags)) { + /* Handle internally generated ABTS timeout */ + BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", + io_req->refcount.refcount.counter); + if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { + + lport = io_req->port->lport; + rdata = io_req->tgt->rdata; + logo_issued = test_and_set_bit( + BNX2FC_FLAG_EXPL_LOGO, + &tgt->flags); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + /* Explicitly logo the target */ + if (!logo_issued) { + BNX2FC_IO_DBG(io_req, "Explicit " + "logo - tgt flags = 0x%lx\n", + tgt->flags); + + mutex_lock(&lport->disc.disc_mutex); + lport->tt.rport_logoff(rdata); + mutex_unlock(&lport->disc.disc_mutex); + } + return; + } + } else { + /* Hanlde IO timeout */ + BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); + if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed before " + " timer expiry\n"); + goto done; + } + + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &io_req->req_flags)) { + rc = bnx2fc_initiate_abts(io_req); + if (rc == SUCCESS) + goto done; + /* + * Explicitly logo the target if + * abts initiation fails + */ + lport = io_req->port->lport; + rdata = io_req->tgt->rdata; + logo_issued = test_and_set_bit( + BNX2FC_FLAG_EXPL_LOGO, + &tgt->flags); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + if (!logo_issued) { + BNX2FC_IO_DBG(io_req, "Explicit " + "logo - tgt flags = 0x%lx\n", + tgt->flags); + + + mutex_lock(&lport->disc.disc_mutex); + lport->tt.rport_logoff(rdata); + mutex_unlock(&lport->disc.disc_mutex); + } + return; + } else { + BNX2FC_IO_DBG(io_req, "IO already in " + "ABTS processing\n"); + } + } + break; + case BNX2FC_ELS: + + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); + + if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags)) { + lport = io_req->port->lport; + rdata = io_req->tgt->rdata; + logo_issued = test_and_set_bit( + BNX2FC_FLAG_EXPL_LOGO, + &tgt->flags); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + /* Explicitly logo the target */ + if (!logo_issued) { + BNX2FC_IO_DBG(io_req, "Explicitly logo" + "(els)\n"); + mutex_lock(&lport->disc.disc_mutex); + lport->tt.rport_logoff(rdata); + mutex_unlock(&lport->disc.disc_mutex); + } + return; + } + } else { + /* + * Handle ELS timeout. + * tgt_lock is used to sync compl path and timeout + * path. If els compl path is processing this IO, we + * have nothing to do here, just release the timer hold + */ + BNX2FC_IO_DBG(io_req, "ELS timed out\n"); + if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, + &io_req->req_flags)) + goto done; + + /* Indicate the cb_func that this ELS is timed out */ + set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); + + if ((io_req->cb_func) && (io_req->cb_arg)) { + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + } + break; + default: + printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", + cmd_type); + break; + } + +done: + /* release the cmd that was held when timer was set */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); +} + +static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) +{ + /* Called with host lock held */ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + + /* + * active_cmd_queue may have other command types as well, + * and during flush operation, we want to error back only + * scsi commands. + */ + if (io_req->cmd_type != BNX2FC_SCSI_CMD) + return; + + BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); + bnx2fc_unmap_sg_list(io_req); + io_req->sc_cmd = NULL; + if (!sc_cmd) { + printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " + "IO(0x%x) already cleaned up\n", + io_req->xid); + return; + } + sc_cmd->result = err_code << 16; + + BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", + sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, + sc_cmd->allowed); + scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); + sc_cmd->SCp.ptr = NULL; + sc_cmd->scsi_done(sc_cmd); +} + +struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba, + u16 min_xid, u16 max_xid) +{ + struct bnx2fc_cmd_mgr *cmgr; + struct io_bdt *bdt_info; + struct bnx2fc_cmd *io_req; + size_t len; + u32 mem_size; + u16 xid; + int i; + int num_ios; + size_t bd_tbl_sz; + + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { + printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ + and max_xid 0x%x\n", min_xid, max_xid); + return NULL; + } + BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); + + num_ios = max_xid - min_xid + 1; + len = (num_ios * (sizeof(struct bnx2fc_cmd *))); + len += sizeof(struct bnx2fc_cmd_mgr); + + cmgr = kzalloc(len, GFP_KERNEL); + if (!cmgr) { + printk(KERN_ERR PFX "failed to alloc cmgr\n"); + return NULL; + } + + cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * + num_possible_cpus(), GFP_KERNEL); + if (!cmgr->free_list) { + printk(KERN_ERR PFX "failed to alloc free_list\n"); + goto mem_err; + } + + cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * + num_possible_cpus(), GFP_KERNEL); + if (!cmgr->free_list_lock) { + printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); + goto mem_err; + } + + cmgr->hba = hba; + cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); + + for (i = 0; i < num_possible_cpus(); i++) { + INIT_LIST_HEAD(&cmgr->free_list[i]); + spin_lock_init(&cmgr->free_list_lock[i]); + } + + /* Pre-allocated pool of bnx2fc_cmds */ + xid = BNX2FC_MIN_XID; + for (i = 0; i < num_ios; i++) { + io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); + + if (!io_req) { + printk(KERN_ERR PFX "failed to alloc io_req\n"); + goto mem_err; + } + + INIT_LIST_HEAD(&io_req->link); + INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); + + io_req->xid = xid++; + if (io_req->xid >= BNX2FC_MAX_OUTSTANDING_CMNDS) + printk(KERN_ERR PFX "ERROR allocating xids - 0x%x\n", + io_req->xid); + list_add_tail(&io_req->link, + &cmgr->free_list[io_req->xid % num_possible_cpus()]); + io_req++; + } + + /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ + mem_size = num_ios * sizeof(struct io_bdt *); + cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); + if (!cmgr->io_bdt_pool) { + printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); + goto mem_err; + } + + mem_size = sizeof(struct io_bdt); + for (i = 0; i < num_ios; i++) { + cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); + if (!cmgr->io_bdt_pool[i]) { + printk(KERN_ERR PFX "failed to alloc " + "io_bdt_pool[%d]\n", i); + goto mem_err; + } + } + + /* Allocate an map fcoe_bdt_ctx structures */ + bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + bd_tbl_sz, + &bdt_info->bd_tbl_dma, + GFP_KERNEL); + if (!bdt_info->bd_tbl) { + printk(KERN_ERR PFX "failed to alloc " + "bdt_tbl[%d]\n", i); + goto mem_err; + } + } + + return cmgr; + +mem_err: + bnx2fc_cmd_mgr_free(cmgr); + return NULL; +} + +void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) +{ + struct io_bdt *bdt_info; + struct bnx2fc_hba *hba = cmgr->hba; + size_t bd_tbl_sz; + u16 min_xid = BNX2FC_MIN_XID; + u16 max_xid = BNX2FC_MAX_XID; + int num_ios; + int i; + + num_ios = max_xid - min_xid + 1; + + /* Free fcoe_bdt_ctx structures */ + if (!cmgr->io_bdt_pool) + goto free_cmd_pool; + + bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + if (bdt_info->bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, + bdt_info->bd_tbl, + bdt_info->bd_tbl_dma); + bdt_info->bd_tbl = NULL; + } + } + + /* Destroy io_bdt pool */ + for (i = 0; i < num_ios; i++) { + kfree(cmgr->io_bdt_pool[i]); + cmgr->io_bdt_pool[i] = NULL; + } + + kfree(cmgr->io_bdt_pool); + cmgr->io_bdt_pool = NULL; + +free_cmd_pool: + kfree(cmgr->free_list_lock); + + /* Destroy cmd pool */ + if (!cmgr->free_list) + goto free_cmgr; + + for (i = 0; i < num_possible_cpus(); i++) { + struct list_head *list; + struct list_head *tmp; + + list_for_each_safe(list, tmp, &cmgr->free_list[i]) { + struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list; + list_del(&io_req->link); + kfree(io_req); + } + } + kfree(cmgr->free_list); +free_cmgr: + /* Free command manager itself */ + kfree(cmgr); +} + +struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; + struct bnx2fc_cmd *io_req; + struct list_head *listp; + struct io_bdt *bd_tbl; + u32 max_sqes; + u16 xid; + + max_sqes = tgt->max_sqes; + switch (type) { + case BNX2FC_TASK_MGMT_CMD: + max_sqes = BNX2FC_TM_MAX_SQES; + break; + case BNX2FC_ELS: + max_sqes = BNX2FC_ELS_MAX_SQES; + break; + default: + break; + } + + /* + * NOTE: Free list insertions and deletions are protected with + * cmgr lock + */ + spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + if ((list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) || + (tgt->num_active_ios.counter >= max_sqes)) { + BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " + "ios(%d):sqes(%d)\n", + tgt->num_active_ios.counter, tgt->max_sqes); + if (list_empty(&(cmd_mgr->free_list[smp_processor_id()]))) + printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); + spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + return NULL; + } + + listp = (struct list_head *) + cmd_mgr->free_list[smp_processor_id()].next; + list_del_init(listp); + io_req = (struct bnx2fc_cmd *) listp; + xid = io_req->xid; + cmd_mgr->cmds[xid] = io_req; + atomic_inc(&tgt->num_active_ios); + spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + + INIT_LIST_HEAD(&io_req->link); + + io_req->port = port; + io_req->cmd_mgr = cmd_mgr; + io_req->req_flags = 0; + io_req->cmd_type = type; + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + bd_tbl->io_req = io_req; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); + return io_req; +} +static struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_cmd_mgr *cmd_mgr = hba->cmd_mgr; + struct bnx2fc_cmd *io_req; + struct list_head *listp; + struct io_bdt *bd_tbl; + u32 max_sqes; + u16 xid; + + max_sqes = BNX2FC_SCSI_MAX_SQES; + /* + * NOTE: Free list insertions and deletions are protected with + * cmgr lock + */ + spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + if ((list_empty(&cmd_mgr->free_list[smp_processor_id()])) || + (tgt->num_active_ios.counter >= max_sqes)) { + spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + return NULL; + } + + listp = (struct list_head *) + cmd_mgr->free_list[smp_processor_id()].next; + list_del_init(listp); + io_req = (struct bnx2fc_cmd *) listp; + xid = io_req->xid; + cmd_mgr->cmds[xid] = io_req; + atomic_inc(&tgt->num_active_ios); + spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + + INIT_LIST_HEAD(&io_req->link); + + io_req->port = port; + io_req->cmd_mgr = cmd_mgr; + io_req->req_flags = 0; + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + bd_tbl->io_req = io_req; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); + return io_req; +} + +void bnx2fc_cmd_release(struct kref *ref) +{ + struct bnx2fc_cmd *io_req = container_of(ref, + struct bnx2fc_cmd, refcount); + struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; + + spin_lock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); + if (io_req->cmd_type != BNX2FC_SCSI_CMD) + bnx2fc_free_mp_resc(io_req); + cmd_mgr->cmds[io_req->xid] = NULL; + /* Delete IO from retire queue */ + list_del_init(&io_req->link); + /* Add it to the free list */ + list_add(&io_req->link, + &cmd_mgr->free_list[smp_processor_id()]); + atomic_dec(&io_req->tgt->num_active_ios); + spin_unlock_bh(&cmd_mgr->free_list_lock[smp_processor_id()]); +} + +static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); + struct bnx2fc_hba *hba = io_req->port->priv; + size_t sz = sizeof(struct fcoe_bd_ctx); + + /* clear tm flags */ + mp_req->tm_flags = 0; + if (mp_req->mp_req_bd) { + dma_free_coherent(&hba->pcidev->dev, sz, + mp_req->mp_req_bd, + mp_req->mp_req_bd_dma); + mp_req->mp_req_bd = NULL; + } + if (mp_req->mp_resp_bd) { + dma_free_coherent(&hba->pcidev->dev, sz, + mp_req->mp_resp_bd, + mp_req->mp_resp_bd_dma); + mp_req->mp_resp_bd = NULL; + } + if (mp_req->req_buf) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + mp_req->req_buf, + mp_req->req_buf_dma); + mp_req->req_buf = NULL; + } + if (mp_req->resp_buf) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + mp_req->resp_buf, + mp_req->resp_buf_dma); + mp_req->resp_buf = NULL; + } +} + +int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_mp_req *mp_req; + struct fcoe_bd_ctx *mp_req_bd; + struct fcoe_bd_ctx *mp_resp_bd; + struct bnx2fc_hba *hba = io_req->port->priv; + dma_addr_t addr; + size_t sz; + + mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); + memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); + + mp_req->req_len = sizeof(struct fcp_cmnd); + io_req->data_xfer_len = mp_req->req_len; + mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &mp_req->req_buf_dma, + GFP_ATOMIC); + if (!mp_req->req_buf) { + printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + + mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &mp_req->resp_buf_dma, + GFP_ATOMIC); + if (!mp_req->resp_buf) { + printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + memset(mp_req->req_buf, 0, PAGE_SIZE); + memset(mp_req->resp_buf, 0, PAGE_SIZE); + + /* Allocate and map mp_req_bd and mp_resp_bd */ + sz = sizeof(struct fcoe_bd_ctx); + mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, + &mp_req->mp_req_bd_dma, + GFP_ATOMIC); + if (!mp_req->mp_req_bd) { + printk(KERN_ERR PFX "unable to alloc MP req bd\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, + &mp_req->mp_resp_bd_dma, + GFP_ATOMIC); + if (!mp_req->mp_req_bd) { + printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + /* Fill bd table */ + addr = mp_req->req_buf_dma; + mp_req_bd = mp_req->mp_req_bd; + mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; + mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); + mp_req_bd->buf_len = PAGE_SIZE; + mp_req_bd->flags = 0; + + /* + * MP buffer is either a task mgmt command or an ELS. + * So the assumption is that it consumes a single bd + * entry in the bd table + */ + mp_resp_bd = mp_req->mp_resp_bd; + addr = mp_req->resp_buf_dma; + mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; + mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); + mp_resp_bd->buf_len = PAGE_SIZE; + mp_resp_bd->flags = 0; + + return SUCCESS; +} + +static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) +{ + struct fc_lport *lport; + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct fcoe_port *port; + struct bnx2fc_hba *hba; + struct bnx2fc_rport *tgt; + struct bnx2fc_cmd *io_req; + struct bnx2fc_mp_req *tm_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct Scsi_Host *host = sc_cmd->device->host; + struct fc_frame_header *fc_hdr; + struct fcp_cmnd *fcp_cmnd; + int task_idx, index; + int rc = SUCCESS; + u16 xid; + u32 sid, did; + unsigned long start = jiffies; + + lport = shost_priv(host); + port = lport_priv(lport); + hba = port->priv; + + if (rport == NULL) { + printk(KERN_ALERT PFX "device_reset: rport is NULL\n"); + rc = FAILED; + goto tmf_err; + } + + rc = fc_block_scsi_eh(sc_cmd); + if (rc) + return rc; + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ERR PFX "device_reset: link is not ready\n"); + rc = FAILED; + goto tmf_err; + } + /* rport and tgt are allocated together, so tgt should be non-NULL */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { + printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); + rc = FAILED; + goto tmf_err; + } +retry_tmf: + io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); + if (!io_req) { + if (time_after(jiffies, start + HZ)) { + printk(KERN_ERR PFX "tmf: Failed TMF"); + rc = FAILED; + goto tmf_err; + } + msleep(20); + goto retry_tmf; + } + /* Initialize rest of io_req fields */ + io_req->sc_cmd = sc_cmd; + io_req->port = port; + io_req->tgt = tgt; + + tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); + + rc = bnx2fc_init_mp_req(io_req); + if (rc == FAILED) { + printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + goto tmf_err; + } + + /* Set TM flags */ + io_req->io_req_flags = 0; + tm_req->tm_flags = tm_flags; + + /* Fill FCP_CMND */ + bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); + fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; + memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); + fcp_cmnd->fc_dl = 0; + + /* Fill FC header */ + fc_hdr = &(tm_req->req_fc_hdr); + sid = tgt->sid; + did = rport->port_id; + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, + FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + /* Obtain exchange id */ + xid = io_req->xid; + + BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(io_req, task); + + sc_cmd->SCp.ptr = (char *)io_req; + + /* Obtain free SQ entry */ + spin_lock_bh(&tgt->tgt_lock); + bnx2fc_add_2_sq(tgt, xid); + + /* Enqueue the io_req to active_tm_queue */ + io_req->on_tmf_queue = 1; + list_add_tail(&io_req->link, &tgt->active_tm_queue); + + init_completion(&io_req->tm_done); + io_req->wait_for_comp = 1; + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + spin_unlock_bh(&tgt->tgt_lock); + + rc = wait_for_completion_timeout(&io_req->tm_done, + BNX2FC_TM_TIMEOUT * HZ); + spin_lock_bh(&tgt->tgt_lock); + + io_req->wait_for_comp = 0; + if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) + set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); + + spin_unlock_bh(&tgt->tgt_lock); + + if (!rc) { + printk(KERN_ERR PFX "task mgmt command failed...\n"); + rc = FAILED; + } else { + printk(KERN_ERR PFX "task mgmt command success...\n"); + rc = SUCCESS; + } +tmf_err: + return rc; +} + +int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) +{ + struct fc_lport *lport; + struct bnx2fc_rport *tgt = io_req->tgt; + struct fc_rport *rport = tgt->rport; + struct fc_rport_priv *rdata = tgt->rdata; + struct bnx2fc_hba *hba; + struct fcoe_port *port; + struct bnx2fc_cmd *abts_io_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct fc_frame_header *fc_hdr; + struct bnx2fc_mp_req *abts_req; + int task_idx, index; + u32 sid, did; + u16 xid; + int rc = SUCCESS; + u32 r_a_tov = rdata->r_a_tov; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); + + port = io_req->port; + hba = port->priv; + lport = port->lport; + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); + rc = FAILED; + goto abts_err; + } + + if (rport == NULL) { + printk(KERN_ALERT PFX "initiate_abts: rport is NULL\n"); + rc = FAILED; + goto abts_err; + } + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); + rc = FAILED; + goto abts_err; + } + + abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); + if (!abts_io_req) { + printk(KERN_ERR PFX "abts: couldnt allocate cmd\n"); + rc = FAILED; + goto abts_err; + } + + /* Initialize rest of io_req fields */ + abts_io_req->sc_cmd = NULL; + abts_io_req->port = port; + abts_io_req->tgt = tgt; + abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ + + abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); + memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); + + /* Fill FC header */ + fc_hdr = &(abts_req->req_fc_hdr); + + /* Obtain oxid and rxid for the original exchange to be aborted */ + fc_hdr->fh_ox_id = htons(io_req->xid); + fc_hdr->fh_rx_id = htons(io_req->task->rx_wr_tx_rd.rx_id); + + sid = tgt->sid; + did = rport->port_id; + + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, + FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + + xid = abts_io_req->xid; + BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(abts_io_req, task); + + /* + * ABTS task is a temporary task that will be cleaned up + * irrespective of ABTS response. We need to start the timer + * for the original exchange, as the CQE is posted for the original + * IO request. + * + * Timer for ABTS is started only when it is originated by a + * TM request. For the ABTS issued as part of ULP timeout, + * scsi-ml maintains the timers. + */ + + /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ + bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + +abts_err: + return rc; +} + +int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) +{ + struct fc_lport *lport; + struct bnx2fc_rport *tgt = io_req->tgt; + struct bnx2fc_hba *hba; + struct fcoe_port *port; + struct bnx2fc_cmd *cleanup_io_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + int task_idx, index; + u16 xid, orig_xid; + int rc = 0; + + /* ASSUMPTION: called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); + + port = io_req->port; + hba = port->priv; + lport = port->lport; + + cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); + if (!cleanup_io_req) { + printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); + rc = -1; + goto cleanup_err; + } + + /* Initialize rest of io_req fields */ + cleanup_io_req->sc_cmd = NULL; + cleanup_io_req->port = port; + cleanup_io_req->tgt = tgt; + cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ + + xid = cleanup_io_req->xid; + + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + orig_xid = io_req->xid; + + BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); + + bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + +cleanup_err: + return rc; +} + +/** + * bnx2fc_eh_target_reset: Reset a target + * + * @sc_cmd: SCSI command + * + * Set from SCSI host template to send task mgmt command to the target + * and wait for the response + */ +int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) +{ + return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); +} + +/** + * bnx2fc_eh_device_reset - Reset a single LUN + * + * @sc_cmd: SCSI command + * + * Set from SCSI host template to send task mgmt command to the target + * and wait for the response + */ +int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) +{ + return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); +} + +/** + * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding + * SCSI command + * + * @sc_cmd: SCSI_ML command pointer + * + * SCSI abort request handler + */ +int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct bnx2fc_cmd *io_req; + struct fc_lport *lport; + struct bnx2fc_rport *tgt; + int rc = FAILED; + + + rc = fc_block_scsi_eh(sc_cmd); + if (rc) + return rc; + + lport = shost_priv(sc_cmd->device->host); + if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { + printk(KERN_ALERT PFX "eh_abort: link not ready\n"); + return rc; + } + + tgt = (struct bnx2fc_rport *)&rp[1]; + + BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); + + spin_lock_bh(&tgt->tgt_lock); + io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr; + if (!io_req) { + /* Command might have just completed */ + printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", + io_req->refcount.refcount.counter); + + /* Hold IO request across abort processing */ + kref_get(&io_req->refcount); + + BUG_ON(tgt != io_req->tgt); + + /* Remove the io_req from the active_q. */ + /* + * Task Mgmt functions (LUN RESET & TGT RESET) will not + * issue an ABTS on this particular IO req, as the + * io_req is no longer in the active_q. + */ + if (tgt->flush_in_prog) { + printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " + "flush in progress\n", io_req->xid); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + + if (io_req->on_active_queue == 0) { + printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " + "not on active_q\n", io_req->xid); + /* + * This condition can happen only due to the FW bug, + * where we do not receive cleanup response from + * the FW. Handle this case gracefully by erroring + * back the IO request to SCSI-ml + */ + bnx2fc_scsi_done(io_req, DID_ABORT); + + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + + /* + * Only eh_abort processing will remove the IO from + * active_cmd_q before processing the request. this is + * done to avoid race conditions between IOs aborted + * as part of task management completion and eh_abort + * processing + */ + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + + init_completion(&io_req->tm_done); + io_req->wait_for_comp = 1; + + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { + /* Cancel the current timer running on this io_req */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); + rc = bnx2fc_initiate_abts(io_req); + } else { + printk(KERN_ALERT PFX "eh_abort: io_req (xid = 0x%x) " + "already in abts processing\n", io_req->xid); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + if (rc == FAILED) { + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return rc; + } + spin_unlock_bh(&tgt->tgt_lock); + + wait_for_completion(&io_req->tm_done); + + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_comp = 0; + if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { + /* Let the scsi-ml try to recover this command */ + printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", + io_req->xid); + rc = FAILED; + } else { + /* + * We come here even when there was a race condition + * between timeout and abts completion, and abts + * completion happens just in time. + */ + BNX2FC_IO_DBG(io_req, "abort succeeded\n"); + rc = SUCCESS; + bnx2fc_scsi_done(io_req, DID_ABORT); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } + + /* release the reference taken in eh_abort */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return rc; +} + +void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq) +{ + BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " + "refcnt = %d, cmd_type = %d\n", + io_req->refcount.refcount.counter, io_req->cmd_type); + bnx2fc_scsi_done(io_req, DID_ERROR); + kref_put(&io_req->refcount, bnx2fc_cmd_release); +} + +void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq) +{ + u32 r_ctl; + u32 r_a_tov = FC_DEF_R_A_TOV; + u8 issue_rrq = 0; + struct bnx2fc_rport *tgt = io_req->tgt; + + BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" + "refcnt = %d, cmd_type = %d\n", + io_req->xid, + io_req->refcount.refcount.counter, io_req->cmd_type); + + if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "Timer context finished processing" + " this io\n"); + return; + } + + /* Do not issue RRQ as this IO is already cleanedup */ + if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) + goto io_compl; + + /* + * For ABTS issued due to SCSI eh_abort_handler, timeout + * values are maintained by scsi-ml itself. Cancel timeout + * in case ABTS issued as part of task management function + * or due to FW error. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + r_ctl = task->cmn.general.rsp_info.abts_rsp.r_ctl; + + switch (r_ctl) { + case FC_RCTL_BA_ACC: + /* + * Dont release this cmd yet. It will be relesed + * after we get RRQ response + */ + BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); + issue_rrq = 1; + break; + + case FC_RCTL_BA_RJT: + BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); + break; + default: + printk(KERN_ERR PFX "Unknown ABTS response\n"); + break; + } + + if (issue_rrq) { + BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); + set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); + } + set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); + bnx2fc_cmd_timer_set(io_req, r_a_tov); + +io_compl: + if (io_req->wait_for_comp) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) + complete(&io_req->tm_done); + } else { + /* + * We end up here when ABTS is issued as + * in asynchronous context, i.e., as part + * of task management completion, or + * when FW error is received or when the + * ABTS is issued when the IO is timed + * out. + */ + + if (io_req->on_active_queue) { + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + } + bnx2fc_scsi_done(io_req, DID_ERROR); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } +} + +static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct bnx2fc_rport *tgt = io_req->tgt; + struct list_head *list; + struct list_head *tmp; + struct bnx2fc_cmd *cmd; + int tm_lun = sc_cmd->device->lun; + int rc = 0; + int lun; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); + /* + * Walk thru the active_ios queue and ABORT the IO + * that matches with the LUN that was reset + */ + list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { + BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); + cmd = (struct bnx2fc_cmd *)list; + lun = cmd->sc_cmd->device->lun; + if (lun == tm_lun) { + /* Initiate ABTS on this cmd */ + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &cmd->req_flags)) { + /* cancel the IO timeout */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); + /* timer hold */ + rc = bnx2fc_initiate_abts(cmd); + /* abts shouldnt fail in this context */ + WARN_ON(rc != SUCCESS); + } else + printk(KERN_ERR PFX "lun_rst: abts already in" + " progress for this IO 0x%x\n", + cmd->xid); + } + } +} + +static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_rport *tgt = io_req->tgt; + struct list_head *list; + struct list_head *tmp; + struct bnx2fc_cmd *cmd; + int rc = 0; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); + /* + * Walk thru the active_ios queue and ABORT the IO + * that matches with the LUN that was reset + */ + list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { + BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); + cmd = (struct bnx2fc_cmd *)list; + /* Initiate ABTS */ + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &cmd->req_flags)) { + /* cancel the IO timeout */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* timer hold */ + rc = bnx2fc_initiate_abts(cmd); + /* abts shouldnt fail in this context */ + WARN_ON(rc != SUCCESS); + + } else + printk(KERN_ERR PFX "tgt_rst: abts already in progress" + " for this IO 0x%x\n", cmd->xid); + } +} + +void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, u8 num_rq) +{ + struct bnx2fc_mp_req *tm_req; + struct fc_frame_header *fc_hdr; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + u64 *hdr; + u64 *temp_hdr; + void *rsp_buf; + + /* Called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); + + if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) + set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); + else { + /* TM has already timed out and we got + * delayed completion. Ignore completion + * processing. + */ + return; + } + + tm_req = &(io_req->mp_req); + fc_hdr = &(tm_req->resp_fc_hdr); + hdr = (u64 *)fc_hdr; + temp_hdr = (u64 *) + &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr; + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + tm_req->resp_len = task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_off; + + rsp_buf = tm_req->resp_buf; + + if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { + bnx2fc_parse_fcp_rsp(io_req, + (struct fcoe_fcp_rsp_payload *) + rsp_buf, num_rq); + if (io_req->fcp_rsp_code == 0) { + /* TM successful */ + if (tm_req->tm_flags & FCP_TMF_LUN_RESET) + bnx2fc_lun_reset_cmpl(io_req); + else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) + bnx2fc_tgt_reset_cmpl(io_req); + } + } else { + printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", + fc_hdr->fh_r_ctl); + } + if (!sc_cmd->SCp.ptr) { + printk(KERN_ALERT PFX "tm_compl: SCp.ptr is NULL\n"); + return; + } + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good IO completion */ + sc_cmd->result = DID_OK << 16; + } else { + /* Transport status is good, SCSI status not good */ + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + break; + + default: + BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", + io_req->fcp_status); + break; + } + + sc_cmd = io_req->sc_cmd; + io_req->sc_cmd = NULL; + + /* check if the io_req exists in tgt's tmf_q */ + if (io_req->on_tmf_queue) { + + list_del_init(&io_req->link); + io_req->on_tmf_queue = 0; + } else { + + printk(KERN_ALERT PFX "Command not on active_cmd_queue!\n"); + return; + } + + sc_cmd->SCp.ptr = NULL; + sc_cmd->scsi_done(sc_cmd); + + kref_put(&io_req->refcount, bnx2fc_cmd_release); + if (io_req->wait_for_comp) { + BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); + complete(&io_req->tm_done); + } +} + +static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, + int bd_index) +{ + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + int frag_size, sg_frags; + + sg_frags = 0; + while (sg_len) { + if (sg_len >= BNX2FC_BD_SPLIT_SZ) + frag_size = BNX2FC_BD_SPLIT_SZ; + else + frag_size = sg_len; + bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; + bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; + bd[bd_index + sg_frags].buf_len = (u16)frag_size; + bd[bd_index + sg_frags].flags = 0; + + addr += (u64) frag_size; + sg_frags++; + sg_len -= frag_size; + } + return sg_frags; + +} + +static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + struct scatterlist *sg; + int byte_count = 0; + int sg_count = 0; + int bd_count = 0; + int sg_frags; + unsigned int sg_len; + u64 addr; + int i; + + sg_count = scsi_dma_map(sc); + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = sg_dma_address(sg); + if (sg_len > BNX2FC_MAX_BD_LEN) { + sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, + bd_count); + } else { + + sg_frags = 1; + bd[bd_count].buf_addr_lo = addr & 0xffffffff; + bd[bd_count].buf_addr_hi = addr >> 32; + bd[bd_count].buf_len = (u16)sg_len; + bd[bd_count].flags = 0; + } + bd_count += sg_frags; + byte_count += sg_len; + } + if (byte_count != scsi_bufflen(sc)) + printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " + "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), + io_req->xid); + return bd_count; +} + +static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + int bd_count; + + if (scsi_sg_count(sc)) + bd_count = bnx2fc_map_sg(io_req); + else { + bd_count = 0; + bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; + bd[0].buf_len = bd[0].flags = 0; + } + io_req->bd_tbl->bd_valid = bd_count; +} + +static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + + if (io_req->bd_tbl->bd_valid && sc) { + scsi_dma_unmap(sc); + io_req->bd_tbl->bd_valid = 0; + } +} + +void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, + struct fcp_cmnd *fcp_cmnd) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + char tag[2]; + + memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); + + int_to_scsilun(sc_cmd->device->lun, + (struct scsi_lun *) fcp_cmnd->fc_lun); + + + fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); + memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); + + fcp_cmnd->fc_cmdref = 0; + fcp_cmnd->fc_pri_ta = 0; + fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; + fcp_cmnd->fc_flags = io_req->io_req_flags; + + if (scsi_populate_tag_msg(sc_cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ; + break; + case ORDERED_QUEUE_TAG: + fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED; + break; + default: + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; + break; + } + } else { + fcp_cmnd->fc_pri_ta = 0; + } +} + +static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, + struct fcoe_fcp_rsp_payload *fcp_rsp, + u8 num_rq) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct bnx2fc_rport *tgt = io_req->tgt; + u8 rsp_flags = fcp_rsp->fcp_flags.flags; + u32 rq_buff_len = 0; + int i; + unsigned char *rq_data; + unsigned char *dummy; + int fcp_sns_len = 0; + int fcp_rsp_len = 0; + + io_req->fcp_status = FC_GOOD; + io_req->fcp_resid = fcp_rsp->fcp_resid; + + io_req->scsi_comp_flags = rsp_flags; + CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = + fcp_rsp->scsi_status_code; + + /* Fetch fcp_rsp_info and fcp_sns_info if available */ + if (num_rq) { + + /* + * We do not anticipate num_rq >1, as the linux defined + * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO + * 256 bytes of single rq buffer is good enough to hold this. + */ + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { + fcp_rsp_len = rq_buff_len + = fcp_rsp->fcp_rsp_len; + } + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { + fcp_sns_len = fcp_rsp->fcp_sns_len; + rq_buff_len += fcp_rsp->fcp_sns_len; + } + + io_req->fcp_rsp_len = fcp_rsp_len; + io_req->fcp_sns_len = fcp_sns_len; + + if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { + /* Invalid sense sense length. */ + printk(KERN_ALERT PFX "invalid sns length %d\n", + rq_buff_len); + /* reset rq_buff_len */ + rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; + } + + rq_data = bnx2fc_get_next_rqe(tgt, 1); + + if (num_rq > 1) { + /* We do not need extra sense data */ + for (i = 1; i < num_rq; i++) + dummy = bnx2fc_get_next_rqe(tgt, 1); + } + + /* fetch fcp_rsp_code */ + if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { + /* Only for task management function */ + io_req->fcp_rsp_code = rq_data[3]; + printk(KERN_ERR PFX "fcp_rsp_code = %d\n", + io_req->fcp_rsp_code); + } + + /* fetch sense data */ + rq_data += fcp_rsp_len; + + if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { + printk(KERN_ERR PFX "Truncating sense buffer\n"); + fcp_sns_len = SCSI_SENSE_BUFFERSIZE; + } + + memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer)); + if (fcp_sns_len) + memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); + + /* return RQ entries */ + for (i = 0; i < num_rq; i++) + bnx2fc_return_rqe(tgt, 1); + } +} + +/** + * bnx2fc_queuecommand - Queuecommand function of the scsi template + * + * @host: The Scsi_Host the command was issued to + * @sc_cmd: struct scsi_cmnd to be executed + * + * This is the IO strategy routine, called by SCSI-ML + **/ +int bnx2fc_queuecommand(struct Scsi_Host *host, + struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport = shost_priv(host); + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct bnx2fc_rport *tgt; + struct bnx2fc_cmd *io_req; + int rc = 0; + int rval; + + rval = fc_remote_port_chkready(rport); + if (rval) { + sc_cmd->result = rval; + sc_cmd->scsi_done(sc_cmd); + return 0; + } + + if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + /* rport and tgt are allocated together, so tgt should be non-NULL */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + /* + * Session is not offloaded yet. Let SCSI-ml retry + * the command. + */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + + io_req = bnx2fc_cmd_alloc(tgt); + if (!io_req) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + io_req->sc_cmd = sc_cmd; + + if (bnx2fc_post_io_req(tgt, io_req)) { + printk(KERN_ERR PFX "Unable to post io_req\n"); + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } +exit_qcmd: + return rc; +} + +void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq) +{ + struct fcoe_fcp_rsp_payload *fcp_rsp; + struct bnx2fc_rport *tgt = io_req->tgt; + struct scsi_cmnd *sc_cmd; + struct Scsi_Host *host; + + + /* scsi_cmd_cmpl is called with tgt lock held */ + + if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + /* we will not receive ABTS response for this IO */ + BNX2FC_IO_DBG(io_req, "Timer context finished processing " + "this scsi cmd\n"); + } + + /* Cancel the timeout_work, as we received IO completion */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + sc_cmd = io_req->sc_cmd; + if (sc_cmd == NULL) { + printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); + return; + } + + /* Fetch fcp_rsp from task context and perform cmd completion */ + fcp_rsp = (struct fcoe_fcp_rsp_payload *) + &(task->cmn.general.rsp_info.fcp_rsp.payload); + + /* parse fcp_rsp and obtain sense data from RQ if available */ + bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); + + host = sc_cmd->device->host; + if (!sc_cmd->SCp.ptr) { + printk(KERN_ERR PFX "SCp.ptr is NULL\n"); + return; + } + io_req->sc_cmd = NULL; + + if (io_req->on_active_queue) { + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + } else { + /* This should not happen, but could have been pulled + * by bnx2fc_flush_active_ios(), or during a race + * between command abort and (late) completion. + */ + BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); + if (io_req->wait_for_comp) + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) + complete(&io_req->tm_done); + } + + bnx2fc_unmap_sg_list(io_req); + + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good IO completion */ + sc_cmd->result = DID_OK << 16; + } else { + /* Transport status is good, SCSI status not good */ + BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" + " fcp_resid = 0x%x\n", + io_req->cdb_status, io_req->fcp_resid); + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + break; + default: + printk(KERN_ALERT PFX "scsi_cmd_compl: fcp_status = %d\n", + io_req->fcp_status); + break; + } + sc_cmd->SCp.ptr = NULL; + sc_cmd->scsi_done(sc_cmd); + kref_put(&io_req->refcount, bnx2fc_cmd_release); +} + +static int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, + struct bnx2fc_cmd *io_req) +{ + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct fcoe_port *port = tgt->port; + struct bnx2fc_hba *hba = port->priv; + struct fc_lport *lport = port->lport; + struct fcoe_dev_stats *stats; + int task_idx, index; + u16 xid; + + /* Initialize rest of io_req fields */ + io_req->cmd_type = BNX2FC_SCSI_CMD; + io_req->port = port; + io_req->tgt = tgt; + io_req->data_xfer_len = scsi_bufflen(sc_cmd); + sc_cmd->SCp.ptr = (char *)io_req; + + stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { + io_req->io_req_flags = BNX2FC_READ; + stats->InputRequests++; + stats->InputBytes += io_req->data_xfer_len; + } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + io_req->io_req_flags = BNX2FC_WRITE; + stats->OutputRequests++; + stats->OutputBytes += io_req->data_xfer_len; + } else { + io_req->io_req_flags = 0; + stats->ControlRequests++; + } + put_cpu(); + + xid = io_req->xid; + + /* Build buffer descriptor list for firmware from sg list */ + bnx2fc_build_bd_list_from_sg(io_req); + + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_task(io_req, task); + + spin_lock_bh(&tgt->tgt_lock); + + if (tgt->flush_in_prog) { + printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return -EAGAIN; + } + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "Session not ready...post_io\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return -EAGAIN; + } + + /* Time IO req */ + bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Enqueue the io_req to active_cmd_queue */ + + io_req->on_active_queue = 1; + /* move io_req from pending_queue to active_queue */ + list_add_tail(&io_req->link, &tgt->active_cmd_queue); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + spin_unlock_bh(&tgt->tgt_lock); + return 0; +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c new file mode 100644 index 000000000000..7ea93af60260 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -0,0 +1,844 @@ +/* bnx2fc_tgt.c: Broadcom NetXtreme II Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008 - 2010 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" +static void bnx2fc_upld_timer(unsigned long data); +static void bnx2fc_ofld_timer(unsigned long data); +static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, + struct fcoe_port *port, + struct fc_rport_priv *rdata); +static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id); + +static void bnx2fc_upld_timer(unsigned long data) +{ + + struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data; + + BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n"); + /* fake upload completion */ + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); +} + +static void bnx2fc_ofld_timer(unsigned long data) +{ + + struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data; + + BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n"); + /* NOTE: This function should never be called, as + * offload should never timeout + */ + /* + * If the timer has expired, this session is dead + * Clear offloaded flag and logout of this device. + * Since OFFLOADED flag is cleared, this case + * will be considered as offload error and the + * port will be logged off, and conn_id, session + * resources are freed up in bnx2fc_offload_session + */ + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +static void bnx2fc_offload_session(struct fcoe_port *port, + struct bnx2fc_rport *tgt, + struct fc_rport_priv *rdata) +{ + struct fc_lport *lport = rdata->local_port; + struct fc_rport *rport = rdata->rport; + struct bnx2fc_hba *hba = port->priv; + int rval; + int i = 0; + + /* Initialize bnx2fc_rport */ + /* NOTE: tgt is already bzero'd */ + rval = bnx2fc_init_tgt(tgt, port, rdata); + if (rval) { + printk(KERN_ERR PFX "Failed to allocate conn id for " + "port_id (%6x)\n", rport->port_id); + goto ofld_err; + } + + /* Allocate session resources */ + rval = bnx2fc_alloc_session_resc(hba, tgt); + if (rval) { + printk(KERN_ERR PFX "Failed to allocate resources\n"); + goto ofld_err; + } + + /* + * Initialize FCoE session offload process. + * Upon completion of offload process add + * rport to list of rports + */ +retry_ofld: + clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + rval = bnx2fc_send_session_ofld_req(port, tgt); + if (rval) { + printk(KERN_ERR PFX "ofld_req failed\n"); + goto ofld_err; + } + + /* + * wait for the session is offloaded and enabled. 3 Secs + * should be ample time for this process to complete. + */ + setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt); + mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT); + + wait_event_interruptible(tgt->ofld_wait, + (test_bit( + BNX2FC_FLAG_OFLD_REQ_CMPL, + &tgt->flags))); + if (signal_pending(current)) + flush_signals(current); + + del_timer_sync(&tgt->ofld_timer); + + if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { + if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, + &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, " + "retry ofld..%d\n", i++); + msleep_interruptible(1000); + if (i > 3) { + i = 0; + goto ofld_err; + } + goto retry_ofld; + } + goto ofld_err; + } + if (bnx2fc_map_doorbell(tgt)) { + printk(KERN_ERR PFX "map doorbell failed - no mem\n"); + /* upload will take care of cleaning up sess resc */ + lport->tt.rport_logoff(rdata); + } + return; + +ofld_err: + /* couldn't offload the session. log off from this rport */ + BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n"); + lport->tt.rport_logoff(rdata); + /* Free session resources */ + bnx2fc_free_session_resc(hba, tgt); + if (tgt->fcoe_conn_id != -1) + bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); +} + +void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) +{ + struct bnx2fc_cmd *io_req; + struct list_head *list; + struct list_head *tmp; + int rc; + int i = 0; + BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n", + tgt->num_active_ios.counter); + + spin_lock_bh(&tgt->tgt_lock); + tgt->flush_in_prog = 1; + + list_for_each_safe(list, tmp, &tgt->active_cmd_queue) { + i++; + io_req = (struct bnx2fc_cmd *)list; + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort for IO " + "cleaned up\n"); + complete(&io_req->tm_done); + } + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + } + + set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags); + set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } + + list_for_each_safe(list, tmp, &tgt->els_queue) { + i++; + io_req = (struct bnx2fc_cmd *)list; + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + + BNX2FC_IO_DBG(io_req, "els_queue cleanup\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + if ((io_req->cb_func) && (io_req->cb_arg)) { + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } + + list_for_each_safe(list, tmp, &tgt->io_retire_queue) { + i++; + io_req = (struct bnx2fc_cmd *)list; + list_del_init(&io_req->link); + + BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, bnx2fc_cmd_release); + + clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); + } + + BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i); + i = 0; + spin_unlock_bh(&tgt->tgt_lock); + /* wait for active_ios to go to 0 */ + while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT)) + msleep(25); + if (tgt->num_active_ios.counter != 0) + printk(KERN_ERR PFX "CLEANUP on port 0x%x:" + " active_ios = %d\n", + tgt->rdata->ids.port_id, tgt->num_active_ios.counter); + spin_lock_bh(&tgt->tgt_lock); + tgt->flush_in_prog = 0; + spin_unlock_bh(&tgt->tgt_lock); +} + +static void bnx2fc_upload_session(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct bnx2fc_hba *hba = port->priv; + + BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", + tgt->num_active_ios.counter); + + /* + * Called with hba->hba_mutex held. + * This is a blocking call + */ + clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + bnx2fc_send_session_disable_req(port, tgt); + + /* + * wait for upload to complete. 3 Secs + * should be sufficient time for this process to complete. + */ + setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt); + mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT); + + BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n"); + wait_event_interruptible(tgt->upld_wait, + (test_bit( + BNX2FC_FLAG_UPLD_REQ_COMPL, + &tgt->flags))); + + if (signal_pending(current)) + flush_signals(current); + + del_timer_sync(&tgt->upld_timer); + + /* + * traverse thru the active_q and tmf_q and cleanup + * IOs in these lists + */ + BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n", + tgt->flags); + bnx2fc_flush_active_ios(tgt); + + /* Issue destroy KWQE */ + if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "send destroy req\n"); + clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + bnx2fc_send_session_destroy_req(hba, tgt); + + /* wait for destroy to complete */ + setup_timer(&tgt->upld_timer, + bnx2fc_upld_timer, (unsigned long)tgt); + mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT); + + wait_event_interruptible(tgt->upld_wait, + (test_bit( + BNX2FC_FLAG_UPLD_REQ_COMPL, + &tgt->flags))); + + if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags))) + printk(KERN_ERR PFX "ERROR!! destroy timed out\n"); + + BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n", + tgt->flags); + if (signal_pending(current)) + flush_signals(current); + + del_timer_sync(&tgt->upld_timer); + + } else + printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy" + " not sent to FW\n"); + + /* Free session resources */ + spin_lock_bh(&tgt->cq_lock); + bnx2fc_free_session_resc(hba, tgt); + bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); + spin_unlock_bh(&tgt->cq_lock); +} + +static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, + struct fcoe_port *port, + struct fc_rport_priv *rdata) +{ + + struct fc_rport *rport = rdata->rport; + struct bnx2fc_hba *hba = port->priv; + + tgt->rport = rport; + tgt->rdata = rdata; + tgt->port = port; + + if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) { + BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n"); + tgt->fcoe_conn_id = -1; + return -1; + } + + tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt); + if (tgt->fcoe_conn_id == -1) + return -1; + + BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id); + + tgt->max_sqes = BNX2FC_SQ_WQES_MAX; + tgt->max_rqes = BNX2FC_RQ_WQES_MAX; + tgt->max_cqes = BNX2FC_CQ_WQES_MAX; + + /* Initialize the toggle bit */ + tgt->sq_curr_toggle_bit = 1; + tgt->cq_curr_toggle_bit = 1; + tgt->sq_prod_idx = 0; + tgt->cq_cons_idx = 0; + tgt->rq_prod_idx = 0x8000; + tgt->rq_cons_idx = 0; + atomic_set(&tgt->num_active_ios, 0); + + tgt->work_time_slice = 2; + + spin_lock_init(&tgt->tgt_lock); + spin_lock_init(&tgt->cq_lock); + + /* Initialize active_cmd_queue list */ + INIT_LIST_HEAD(&tgt->active_cmd_queue); + + /* Initialize IO retire queue */ + INIT_LIST_HEAD(&tgt->io_retire_queue); + + INIT_LIST_HEAD(&tgt->els_queue); + + /* Initialize active_tm_queue list */ + INIT_LIST_HEAD(&tgt->active_tm_queue); + + init_waitqueue_head(&tgt->ofld_wait); + init_waitqueue_head(&tgt->upld_wait); + + return 0; +} + +/** + * This event_callback is called after successful completion of libfc + * initiated target login. bnx2fc can proceed with initiating the session + * establishment. + */ +void bnx2fc_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = port->priv; + struct fc_rport *rport = rdata->rport; + struct fc_rport_libfc_priv *rp; + struct bnx2fc_rport *tgt; + u32 port_id; + + BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n", + event, rdata->ids.port_id); + switch (event) { + case RPORT_EV_READY: + if (!rport) { + printk(KERN_ALERT PFX "rport is NULL: ERROR!\n"); + break; + } + + rp = rport->dd_data; + if (rport->port_id == FC_FID_DIR_SERV) { + /* + * bnx2fc_rport structure doesnt exist for + * directory server. + * We should not come here, as lport will + * take care of fabric login + */ + printk(KERN_ALERT PFX "%x - rport_event_handler ERROR\n", + rdata->ids.port_id); + break; + } + + if (rdata->spp_type != FC_TYPE_FCP) { + BNX2FC_HBA_DBG(lport, "not FCP type target." + " not offloading\n"); + break; + } + if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { + BNX2FC_HBA_DBG(lport, "not FCP_TARGET" + " not offloading\n"); + break; + } + + /* + * Offlaod process is protected with hba mutex. + * Use the same mutex_lock for upload process too + */ + mutex_lock(&hba->hba_mutex); + tgt = (struct bnx2fc_rport *)&rp[1]; + + /* This can happen when ADISC finds the same target */ + if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "already offloaded\n"); + mutex_unlock(&hba->hba_mutex); + return; + } + + /* + * Offload the session. This is a blocking call, and will + * wait until the session is offloaded. + */ + bnx2fc_offload_session(port, tgt, rdata); + + BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n", + hba->num_ofld_sess); + + if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) { + /* + * Session is offloaded and enabled. Map + * doorbell register for this target + */ + BNX2FC_TGT_DBG(tgt, "sess offloaded\n"); + /* This counter is protected with hba mutex */ + hba->num_ofld_sess++; + + set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); + } else { + /* + * Offload or enable would have failed. + * In offload/enable completion path, the + * rport would have already been removed + */ + BNX2FC_TGT_DBG(tgt, "Port is being logged off as " + "offloaded flag not set\n"); + } + mutex_unlock(&hba->hba_mutex); + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + port_id = rdata->ids.port_id; + if (port_id == FC_FID_DIR_SERV) + break; + + if (!rport) { + printk(KERN_ALERT PFX "%x - rport not created Yet!!\n", + port_id); + break; + } + rp = rport->dd_data; + mutex_lock(&hba->hba_mutex); + /* + * Perform session upload. Note that rdata->peers is already + * removed from disc->rports list before we get this event. + */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { + mutex_unlock(&hba->hba_mutex); + break; + } + clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); + + bnx2fc_upload_session(port, tgt); + hba->num_ofld_sess--; + BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n", + hba->num_ofld_sess); + /* + * Try to wake up the linkdown wait thread. If num_ofld_sess + * is 0, the waiting therad wakes up + */ + if ((hba->wait_for_link_down) && + (hba->num_ofld_sess == 0)) { + wake_up_interruptible(&hba->shutdown_wait); + } + if (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags)) { + printk(KERN_ERR PFX "Relogin to the tgt\n"); + mutex_lock(&lport->disc.disc_mutex); + lport->tt.rport_login(rdata); + mutex_unlock(&lport->disc.disc_mutex); + } + mutex_unlock(&hba->hba_mutex); + + break; + + case RPORT_EV_NONE: + break; + } +} + +/** + * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id + * + * @port: fcoe_port struct to lookup the target port on + * @port_id: The remote port ID to look up + */ +struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, + u32 port_id) +{ + struct bnx2fc_hba *hba = port->priv; + struct bnx2fc_rport *tgt; + struct fc_rport_priv *rdata; + int i; + + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + tgt = hba->tgt_ofld_list[i]; + if ((tgt) && (tgt->port == port)) { + rdata = tgt->rdata; + if (rdata->ids.port_id == port_id) { + if (rdata->rp_state != RPORT_ST_DELETE) { + BNX2FC_TGT_DBG(tgt, "rport " + "obtained\n"); + return tgt; + } else { + printk(KERN_ERR PFX "rport 0x%x " + "is in DELETED state\n", + rdata->ids.port_id); + return NULL; + } + } + } + } + return NULL; +} + + +/** + * bnx2fc_alloc_conn_id - allocates FCOE Connection id + * + * @hba: pointer to adapter structure + * @tgt: pointer to bnx2fc_rport structure + */ +static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + u32 conn_id, next; + + /* called with hba mutex held */ + + /* + * tgt_ofld_list access is synchronized using + * both hba mutex and hba lock. Atleast hba mutex or + * hba lock needs to be held for read access. + */ + + spin_lock_bh(&hba->hba_lock); + next = hba->next_conn_id; + conn_id = hba->next_conn_id++; + if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS) + hba->next_conn_id = 0; + + while (hba->tgt_ofld_list[conn_id] != NULL) { + conn_id++; + if (conn_id == BNX2FC_NUM_MAX_SESS) + conn_id = 0; + + if (conn_id == next) { + /* No free conn_ids are available */ + spin_unlock_bh(&hba->hba_lock); + return -1; + } + } + hba->tgt_ofld_list[conn_id] = tgt; + tgt->fcoe_conn_id = conn_id; + spin_unlock_bh(&hba->hba_lock); + return conn_id; +} + +static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id) +{ + /* called with hba mutex held */ + spin_lock_bh(&hba->hba_lock); + hba->tgt_ofld_list[conn_id] = NULL; + hba->next_conn_id = conn_id; + spin_unlock_bh(&hba->hba_lock); +} + +/** + *bnx2fc_alloc_session_resc - Allocate qp resources for the session + * + */ +static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + dma_addr_t page; + int num_pages; + u32 *pbl; + + /* Allocate and map SQ */ + tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; + tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, + &tgt->sq_dma, GFP_KERNEL); + if (!tgt->sq) { + printk(KERN_ALERT PFX "unable to allocate SQ memory %d\n", + tgt->sq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->sq, 0, tgt->sq_mem_size); + + /* Allocate and map CQ */ + tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; + tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, + &tgt->cq_dma, GFP_KERNEL); + if (!tgt->cq) { + printk(KERN_ALERT PFX "unable to allocate CQ memory %d\n", + tgt->cq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->cq, 0, tgt->cq_mem_size); + + /* Allocate and map RQ and RQ PBL */ + tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; + tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, + &tgt->rq_dma, GFP_KERNEL); + if (!tgt->rq) { + printk(KERN_ALERT PFX "unable to allocate RQ memory %d\n", + tgt->rq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->rq, 0, tgt->rq_mem_size); + + tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); + tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, + &tgt->rq_pbl_dma, GFP_KERNEL); + if (!tgt->rq_pbl) { + printk(KERN_ALERT PFX "unable to allocate RQ PBL %d\n", + tgt->rq_pbl_size); + goto mem_alloc_failure; + } + + memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); + num_pages = tgt->rq_mem_size / PAGE_SIZE; + page = tgt->rq_dma; + pbl = (u32 *)tgt->rq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += PAGE_SIZE; + } + + /* Allocate and map XFERQ */ + tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; + tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & + PAGE_MASK; + + tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, + &tgt->xferq_dma, GFP_KERNEL); + if (!tgt->xferq) { + printk(KERN_ALERT PFX "unable to allocate XFERQ %d\n", + tgt->xferq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->xferq, 0, tgt->xferq_mem_size); + + /* Allocate and map CONFQ & CONFQ PBL */ + tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; + tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & + PAGE_MASK; + + tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, + &tgt->confq_dma, GFP_KERNEL); + if (!tgt->confq) { + printk(KERN_ALERT PFX "unable to allocate CONFQ %d\n", + tgt->confq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->confq, 0, tgt->confq_mem_size); + + tgt->confq_pbl_size = + (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); + tgt->confq_pbl_size = + (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; + + tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, + tgt->confq_pbl_size, + &tgt->confq_pbl_dma, GFP_KERNEL); + if (!tgt->confq_pbl) { + printk(KERN_ALERT PFX "unable to allocate CONFQ PBL %d\n", + tgt->confq_pbl_size); + goto mem_alloc_failure; + } + + memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); + num_pages = tgt->confq_mem_size / PAGE_SIZE; + page = tgt->confq_dma; + pbl = (u32 *)tgt->confq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += PAGE_SIZE; + } + + /* Allocate and map ConnDB */ + tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); + + tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, + tgt->conn_db_mem_size, + &tgt->conn_db_dma, GFP_KERNEL); + if (!tgt->conn_db) { + printk(KERN_ALERT PFX "unable to allocate conn_db %d\n", + tgt->conn_db_mem_size); + goto mem_alloc_failure; + } + memset(tgt->conn_db, 0, tgt->conn_db_mem_size); + + + /* Allocate and map LCQ */ + tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; + tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & + PAGE_MASK; + + tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, + &tgt->lcq_dma, GFP_KERNEL); + + if (!tgt->lcq) { + printk(KERN_ALERT PFX "unable to allocate lcq %d\n", + tgt->lcq_mem_size); + goto mem_alloc_failure; + } + memset(tgt->lcq, 0, tgt->lcq_mem_size); + + /* Arm CQ */ + tgt->conn_db->cq_arm.lo = -1; + tgt->conn_db->rq_prod = 0x8000; + + return 0; + +mem_alloc_failure: + bnx2fc_free_session_resc(hba, tgt); + bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); + return -ENOMEM; +} + +/** + * bnx2i_free_session_resc - free qp resources for the session + * + * @hba: adapter structure pointer + * @tgt: bnx2fc_rport structure pointer + * + * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL + */ +static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); + + if (tgt->ctx_base) { + iounmap(tgt->ctx_base); + tgt->ctx_base = NULL; + } + /* Free LCQ */ + if (tgt->lcq) { + dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, + tgt->lcq, tgt->lcq_dma); + tgt->lcq = NULL; + } + /* Free connDB */ + if (tgt->conn_db) { + dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size, + tgt->conn_db, tgt->conn_db_dma); + tgt->conn_db = NULL; + } + /* Free confq and confq pbl */ + if (tgt->confq_pbl) { + dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, + tgt->confq_pbl, tgt->confq_pbl_dma); + tgt->confq_pbl = NULL; + } + if (tgt->confq) { + dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size, + tgt->confq, tgt->confq_dma); + tgt->confq = NULL; + } + /* Free XFERQ */ + if (tgt->xferq) { + dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, + tgt->xferq, tgt->xferq_dma); + tgt->xferq = NULL; + } + /* Free RQ PBL and RQ */ + if (tgt->rq_pbl) { + dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, + tgt->rq_pbl, tgt->rq_pbl_dma); + tgt->rq_pbl = NULL; + } + if (tgt->rq) { + dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size, + tgt->rq, tgt->rq_dma); + tgt->rq = NULL; + } + /* Free CQ */ + if (tgt->cq) { + dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, + tgt->cq, tgt->cq_dma); + tgt->cq = NULL; + } + /* Free SQ */ + if (tgt->sq) { + dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, + tgt->sq, tgt->sq_dma); + tgt->sq = NULL; + } +} diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index e1ca5fe7e6bb..cfd59023227b 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -360,7 +360,7 @@ struct bnx2i_hba { #define ADAPTER_STATE_LINK_DOWN 2 #define ADAPTER_STATE_INIT_FAILED 31 unsigned int mtu_supported; - #define BNX2I_MAX_MTU_SUPPORTED 1500 + #define BNX2I_MAX_MTU_SUPPORTED 9000 struct Scsi_Host *shost; @@ -751,6 +751,8 @@ extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn, struct iscsi_task *mtask); extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn, struct iscsi_task *mtask); +extern int bnx2i_send_iscsi_text(struct bnx2i_conn *conn, + struct iscsi_task *mtask); extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, struct bnx2i_cmd *cmnd); extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 96505e3ab986..1da34c019b8a 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -445,6 +445,56 @@ int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, } /** + * bnx2i_send_iscsi_text - post iSCSI text WQE to hardware + * @conn: iscsi connection + * @mtask: driver command structure which is requesting + * a WQE to sent to chip for further processing + * + * prepare and post an iSCSI Text request WQE to CNIC firmware + */ +int bnx2i_send_iscsi_text(struct bnx2i_conn *bnx2i_conn, + struct iscsi_task *mtask) +{ + struct bnx2i_cmd *bnx2i_cmd; + struct bnx2i_text_request *text_wqe; + struct iscsi_text *text_hdr; + u32 dword; + + bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; + text_hdr = (struct iscsi_text *)mtask->hdr; + text_wqe = (struct bnx2i_text_request *) bnx2i_conn->ep->qp.sq_prod_qe; + + memset(text_wqe, 0, sizeof(struct bnx2i_text_request)); + + text_wqe->op_code = text_hdr->opcode; + text_wqe->op_attr = text_hdr->flags; + text_wqe->data_length = ntoh24(text_hdr->dlength); + text_wqe->itt = mtask->itt | + (ISCSI_TASK_TYPE_MPATH << ISCSI_TEXT_REQUEST_TYPE_SHIFT); + text_wqe->ttt = be32_to_cpu(text_hdr->ttt); + + text_wqe->cmd_sn = be32_to_cpu(text_hdr->cmdsn); + + text_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; + text_wqe->resp_bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32); + + dword = ((1 << ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT) | + (bnx2i_conn->gen_pdu.resp_buf_size << + ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); + text_wqe->resp_buffer = dword; + text_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; + text_wqe->bd_list_addr_hi = + (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); + text_wqe->num_bds = 1; + text_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ + + bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); + return 0; +} + + +/** * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware * @conn: iscsi connection * @cmd: driver command structure which is requesting @@ -490,15 +540,18 @@ int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; nopout_hdr = (struct iscsi_nopout *)task->hdr; nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; + + memset(nopout_wqe, 0x00, sizeof(struct bnx2i_nop_out_request)); + nopout_wqe->op_code = nopout_hdr->opcode; nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; memcpy(nopout_wqe->lun, nopout_hdr->lun, 8); if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { - u32 tmp = nopout_hdr->lun[0]; + u32 tmp = nopout_wqe->lun[0]; /* 57710 requires LUN field to be swapped */ - nopout_hdr->lun[0] = nopout_hdr->lun[1]; - nopout_hdr->lun[1] = tmp; + nopout_wqe->lun[0] = nopout_wqe->lun[1]; + nopout_wqe->lun[1] = tmp; } nopout_wqe->itt = ((u16)task->itt | @@ -1425,6 +1478,68 @@ done: return 0; } + +/** + * bnx2i_process_text_resp - this function handles iscsi text response + * @session: iscsi session pointer + * @bnx2i_conn: iscsi connection pointer + * @cqe: pointer to newly DMA'ed CQE entry for processing + * + * process iSCSI Text Response CQE& complete it to open-iscsi user daemon + */ +static int bnx2i_process_text_resp(struct iscsi_session *session, + struct bnx2i_conn *bnx2i_conn, + struct cqe *cqe) +{ + struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; + struct iscsi_task *task; + struct bnx2i_text_response *text; + struct iscsi_text_rsp *resp_hdr; + int pld_len; + int pad_len; + + text = (struct bnx2i_text_response *) cqe; + spin_lock(&session->lock); + task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX); + if (!task) + goto done; + + resp_hdr = (struct iscsi_text_rsp *)&bnx2i_conn->gen_pdu.resp_hdr; + memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); + resp_hdr->opcode = text->op_code; + resp_hdr->flags = text->response_flags; + resp_hdr->hlength = 0; + + hton24(resp_hdr->dlength, text->data_length); + resp_hdr->itt = task->hdr->itt; + resp_hdr->ttt = cpu_to_be32(text->ttt); + resp_hdr->statsn = task->hdr->exp_statsn; + resp_hdr->exp_cmdsn = cpu_to_be32(text->exp_cmd_sn); + resp_hdr->max_cmdsn = cpu_to_be32(text->max_cmd_sn); + pld_len = text->data_length; + bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf + + pld_len; + pad_len = 0; + if (pld_len & 0x3) + pad_len = 4 - (pld_len % 4); + + if (pad_len) { + int i = 0; + for (i = 0; i < pad_len; i++) { + bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0; + bnx2i_conn->gen_pdu.resp_wr_ptr++; + } + } + __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, + bnx2i_conn->gen_pdu.resp_buf, + bnx2i_conn->gen_pdu.resp_wr_ptr - + bnx2i_conn->gen_pdu.resp_buf); +done: + spin_unlock(&session->lock); + return 0; +} + + /** * bnx2i_process_tmf_resp - this function handles iscsi TMF response * @session: iscsi session pointer @@ -1766,6 +1881,10 @@ static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) bnx2i_process_tmf_resp(session, bnx2i_conn, qp->cq_cons_qe); break; + case ISCSI_OP_TEXT_RSP: + bnx2i_process_text_resp(session, bnx2i_conn, + qp->cq_cons_qe); + break; case ISCSI_OP_LOGOUT_RSP: bnx2i_process_logout_resp(session, bnx2i_conn, qp->cq_cons_qe); diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 72a7b2d4a439..1d24a2819736 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); static u32 adapter_count; #define DRV_MODULE_NAME "bnx2i" -#define DRV_MODULE_VERSION "2.6.2.2" -#define DRV_MODULE_RELDATE "Nov 23, 2010" +#define DRV_MODULE_VERSION "2.6.2.3" +#define DRV_MODULE_RELDATE "Dec 31, 2010" static char version[] __devinitdata = "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ @@ -29,7 +29,7 @@ static char version[] __devinitdata = MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com> and " "Eddie Wai <eddie.wai@broadcom.com>"); -MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711" +MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/57710/57711/57712" " iSCSI Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); @@ -88,9 +88,11 @@ void bnx2i_identify_device(struct bnx2i_hba *hba) (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) { set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); hba->mail_queue_access = BNX2I_MQ_BIN_MODE; - } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || - hba->pci_did == PCI_DEVICE_ID_NX2_57711 || - hba->pci_did == PCI_DEVICE_ID_NX2_57711E) + } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || + hba->pci_did == PCI_DEVICE_ID_NX2_57711 || + hba->pci_did == PCI_DEVICE_ID_NX2_57711E || + hba->pci_did == PCI_DEVICE_ID_NX2_57712 || + hba->pci_did == PCI_DEVICE_ID_NX2_57712E) set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); else printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n", @@ -161,6 +163,21 @@ void bnx2i_start(void *handle) struct bnx2i_hba *hba = handle; int i = HZ; + if (!hba->cnic->max_iscsi_conn) { + printk(KERN_ALERT "bnx2i: dev %s does not support " + "iSCSI\n", hba->netdev->name); + + if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { + mutex_lock(&bnx2i_dev_lock); + list_del_init(&hba->link); + adapter_count--; + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); + clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); + mutex_unlock(&bnx2i_dev_lock); + bnx2i_free_hba(hba); + } + return; + } bnx2i_send_fw_iscsi_init_msg(hba); while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) msleep(BNX2I_INIT_POLL_TIME); diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index f0dce26593eb..1809f9ccc4ce 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1092,6 +1092,9 @@ static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) case ISCSI_OP_SCSI_TMFUNC: rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); break; + case ISCSI_OP_TEXT: + rc = bnx2i_send_iscsi_text(bnx2i_conn, task); + break; default: iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, "send_gen: unsupported op 0x%x\n", @@ -1455,42 +1458,40 @@ static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) /** - * bnx2i_conn_get_param - return iscsi connection parameter to caller - * @cls_conn: pointer to iscsi cls conn + * bnx2i_ep_get_param - return iscsi ep parameter to caller + * @ep: pointer to iscsi endpoint * @param: parameter type identifier * @buf: buffer pointer * - * returns iSCSI connection parameters + * returns iSCSI ep parameters */ -static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf) +static int bnx2i_ep_get_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf) { - struct iscsi_conn *conn = cls_conn->dd_data; - struct bnx2i_conn *bnx2i_conn = conn->dd_data; - int len = 0; + struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; + struct bnx2i_hba *hba = bnx2i_ep->hba; + int len = -ENOTCONN; - if (!(bnx2i_conn && bnx2i_conn->ep && bnx2i_conn->ep->hba)) - goto out; + if (!hba) + return -ENOTCONN; switch (param) { case ISCSI_PARAM_CONN_PORT: - mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock); - if (bnx2i_conn->ep->cm_sk) - len = sprintf(buf, "%hu\n", - bnx2i_conn->ep->cm_sk->dst_port); - mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock); + mutex_lock(&hba->net_dev_lock); + if (bnx2i_ep->cm_sk) + len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port); + mutex_unlock(&hba->net_dev_lock); break; case ISCSI_PARAM_CONN_ADDRESS: - mutex_lock(&bnx2i_conn->ep->hba->net_dev_lock); - if (bnx2i_conn->ep->cm_sk) - len = sprintf(buf, "%pI4\n", - &bnx2i_conn->ep->cm_sk->dst_ip); - mutex_unlock(&bnx2i_conn->ep->hba->net_dev_lock); + mutex_lock(&hba->net_dev_lock); + if (bnx2i_ep->cm_sk) + len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip); + mutex_unlock(&hba->net_dev_lock); break; default: - return iscsi_conn_get_param(cls_conn, param, buf); + return -ENOSYS; } -out: + return len; } @@ -1935,13 +1936,13 @@ static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) cnic_dev_10g = 1; switch (bnx2i_ep->state) { - case EP_STATE_CONNECT_FAILED: case EP_STATE_CLEANUP_FAILED: case EP_STATE_OFLD_FAILED: case EP_STATE_DISCONN_TIMEDOUT: ret = 0; break; case EP_STATE_CONNECT_START: + case EP_STATE_CONNECT_FAILED: case EP_STATE_CONNECT_COMPL: case EP_STATE_ULP_UPDATE_START: case EP_STATE_ULP_UPDATE_COMPL: @@ -2167,7 +2168,8 @@ struct iscsi_transport bnx2i_iscsi_transport = { .name = "bnx2i", .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_MULTI_R2T | CAP_DATADGST | - CAP_DATA_PATH_OFFLOAD, + CAP_DATA_PATH_OFFLOAD | + CAP_TEXT_NEGO, .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | ISCSI_HDRDGST_EN | @@ -2200,7 +2202,7 @@ struct iscsi_transport bnx2i_iscsi_transport = { .bind_conn = bnx2i_conn_bind, .destroy_conn = bnx2i_conn_destroy, .set_param = iscsi_set_param, - .get_conn_param = bnx2i_conn_get_param, + .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, .get_host_param = bnx2i_host_get_param, .start_conn = bnx2i_conn_start, @@ -2209,6 +2211,7 @@ struct iscsi_transport bnx2i_iscsi_transport = { .xmit_task = bnx2i_task_xmit, .get_stats = bnx2i_conn_get_stats, /* TCP connect - disconnect - option-2 interface calls */ + .get_ep_param = bnx2i_ep_get_param, .ep_connect = bnx2i_ep_connect, .ep_poll = bnx2i_ep_poll, .ep_disconnect = bnx2i_ep_disconnect, diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index a129a170b47b..fc2cdb62f53b 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -105,7 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { /* owner and name should be set already */ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | - CAP_PADDING_OFFLOAD, + CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | @@ -137,7 +137,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, - .get_conn_param = cxgbi_get_conn_param, + .get_conn_param = iscsi_conn_get_param, .set_param = cxgbi_set_conn_param, .get_stats = cxgbi_get_conn_stats, /* pdu xmit req from user space */ @@ -152,6 +152,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { .xmit_pdu = cxgbi_conn_xmit_pdu, .parse_pdu_itt = cxgbi_parse_pdu_itt, /* TCP connect/disconnect */ + .get_ep_param = cxgbi_get_ep_param, .ep_connect = cxgbi_ep_connect, .ep_poll = cxgbi_ep_poll, .ep_disconnect = cxgbi_ep_disconnect, @@ -1108,10 +1109,11 @@ static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr, csk, idx, npods, gl); for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { - struct sk_buff *skb = ddp->gl_skb[idx]; + struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + + PPOD_SIZE, 0, GFP_ATOMIC); - /* hold on to the skb until we clear the ddp mapping */ - skb_get(skb); + if (!skb) + return -ENOMEM; ulp_mem_io_set_hdr(skb, pm_addr); cxgbi_ddp_ppod_set((struct cxgbi_pagepod *)(skb->head + @@ -1136,56 +1138,20 @@ static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag, cdev, idx, npods, tag); for (i = 0; i < npods; i++, idx++, pm_addr += PPOD_SIZE) { - struct sk_buff *skb = ddp->gl_skb[idx]; + struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + + PPOD_SIZE, 0, GFP_ATOMIC); if (!skb) { - pr_err("tag 0x%x, 0x%x, %d/%u, skb NULL.\n", + pr_err("tag 0x%x, 0x%x, %d/%u, skb OOM.\n", tag, idx, i, npods); continue; } - ddp->gl_skb[idx] = NULL; - memset(skb->head + sizeof(struct ulp_mem_io), 0, PPOD_SIZE); ulp_mem_io_set_hdr(skb, pm_addr); skb->priority = CPL_PRIORITY_CONTROL; cxgb3_ofld_send(cdev->lldev, skb); } } -static void ddp_free_gl_skb(struct cxgbi_ddp_info *ddp, int idx, int cnt) -{ - int i; - - log_debug(1 << CXGBI_DBG_DDP, - "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); - - for (i = 0; i < cnt; i++, idx++) - if (ddp->gl_skb[idx]) { - kfree_skb(ddp->gl_skb[idx]); - ddp->gl_skb[idx] = NULL; - } -} - -static int ddp_alloc_gl_skb(struct cxgbi_ddp_info *ddp, int idx, - int cnt, gfp_t gfp) -{ - int i; - - log_debug(1 << CXGBI_DBG_DDP, - "ddp 0x%p, idx %d, cnt %d.\n", ddp, idx, cnt); - - for (i = 0; i < cnt; i++) { - struct sk_buff *skb = alloc_wr(sizeof(struct ulp_mem_io) + - PPOD_SIZE, 0, gfp); - if (skb) - ddp->gl_skb[idx + i] = skb; - else { - ddp_free_gl_skb(ddp, idx, i); - return -ENOMEM; - } - } - return 0; -} - static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid, int pg_idx, bool reply) { @@ -1316,8 +1282,6 @@ static int cxgb3i_ddp_init(struct cxgbi_device *cdev) } tdev->ulp_iscsi = ddp; - cdev->csk_ddp_free_gl_skb = ddp_free_gl_skb; - cdev->csk_ddp_alloc_gl_skb = ddp_alloc_gl_skb; cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; cdev->csk_ddp_set = ddp_set_map; diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h index 5f5e3394b594..20593fd69d8f 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.h @@ -24,10 +24,21 @@ extern cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS]; -#define cxgb3i_get_private_ipv4addr(ndev) \ - (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) -#define cxgb3i_set_private_ipv4addr(ndev, addr) \ - (((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr) = addr +static inline unsigned int cxgb3i_get_private_ipv4addr(struct net_device *ndev) +{ + return ((struct port_info *)(netdev_priv(ndev)))->iscsi_ipv4addr; +} + +static inline void cxgb3i_set_private_ipv4addr(struct net_device *ndev, + unsigned int addr) +{ + struct port_info *pi = (struct port_info *)netdev_priv(ndev); + + pi->iscsic.flags = addr ? 1 : 0; + pi->iscsi_ipv4addr = addr; + if (addr) + memcpy(pi->iscsic.mac_addr, ndev->dev_addr, ETH_ALEN); +} struct cpl_iscsi_hdr_norss { union opcode_tid ot; diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 8c04fada710b..f3a4cd7cf782 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -106,7 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = { .name = DRV_MODULE_NAME, .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | - CAP_PADDING_OFFLOAD, + CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | @@ -138,7 +138,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = { .destroy_conn = iscsi_tcp_conn_teardown, .start_conn = iscsi_conn_start, .stop_conn = iscsi_conn_stop, - .get_conn_param = cxgbi_get_conn_param, + .get_conn_param = iscsi_conn_get_param, .set_param = cxgbi_set_conn_param, .get_stats = cxgbi_get_conn_stats, /* pdu xmit req from user space */ @@ -153,6 +153,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = { .xmit_pdu = cxgbi_conn_xmit_pdu, .parse_pdu_itt = cxgbi_parse_pdu_itt, /* TCP connect/disconnect */ + .get_ep_param = cxgbi_get_ep_param, .ep_connect = cxgbi_ep_connect, .ep_poll = cxgbi_ep_poll, .ep_disconnect = cxgbi_ep_disconnect, @@ -1425,8 +1426,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev) cxgbi_ddp_page_size_factor(pgsz_factor); cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor); - cdev->csk_ddp_free_gl_skb = NULL; - cdev->csk_ddp_alloc_gl_skb = NULL; cdev->csk_ddp_setup_digest = ddp_setup_conn_digest; cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx; cdev->csk_ddp_set = ddp_set_map; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index a24dff9f9163..de764ea7419d 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -530,6 +530,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) csk->dst = dst; csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr; csk->daddr.sin_port = daddr->sin_port; + csk->daddr.sin_family = daddr->sin_family; csk->saddr.sin_addr.s_addr = rt->rt_src; return csk; @@ -1264,12 +1265,6 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, return idx; } - if (cdev->csk_ddp_alloc_gl_skb) { - err = cdev->csk_ddp_alloc_gl_skb(ddp, idx, npods, gfp); - if (err < 0) - goto unmark_entries; - } - tag = cxgbi_ddp_tag_base(tformat, sw_tag); tag |= idx << PPOD_IDX_SHIFT; @@ -1280,11 +1275,8 @@ static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid, hdr.page_offset = htonl(gl->offset); err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl); - if (err < 0) { - if (cdev->csk_ddp_free_gl_skb) - cdev->csk_ddp_free_gl_skb(ddp, idx, npods); + if (err < 0) goto unmark_entries; - } ddp->idx_last = idx; log_debug(1 << CXGBI_DBG_DDP, @@ -1350,8 +1342,6 @@ static void ddp_destroy(struct kref *kref) >> PPOD_PAGES_SHIFT; pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods); kfree(gl); - if (cdev->csk_ddp_free_gl_skb) - cdev->csk_ddp_free_gl_skb(ddp, i, npods); i += npods; } else i++; @@ -1394,8 +1384,6 @@ int cxgbi_ddp_init(struct cxgbi_device *cdev, return -ENOMEM; } ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1); - ddp->gl_skb = (struct sk_buff **)(((char *)ddp->gl_map) + - ppmax * sizeof(struct cxgbi_gather_list *)); cdev->ddp = ddp; spin_lock_init(&ddp->map_lock); @@ -1895,13 +1883,16 @@ EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu); static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc) { - u8 submode = 0; + if (hcrc || dcrc) { + u8 submode = 0; - if (hcrc) - submode |= 1; - if (dcrc) - submode |= 2; - cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; + if (hcrc) + submode |= 1; + if (dcrc) + submode |= 2; + cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode; + } else + cxgbi_skcb_ulp_mode(skb) = 0; } int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset, @@ -2197,32 +2188,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, } EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); -int cxgbi_get_conn_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf) +int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, + char *buf) { - struct iscsi_conn *iconn = cls_conn->dd_data; + struct cxgbi_endpoint *cep = ep->dd_data; + struct cxgbi_sock *csk; int len; log_debug(1 << CXGBI_DBG_ISCSI, - "cls_conn 0x%p, param %d.\n", cls_conn, param); + "cls_conn 0x%p, param %d.\n", ep, param); switch (param) { case ISCSI_PARAM_CONN_PORT: - spin_lock_bh(&iconn->session->lock); - len = sprintf(buf, "%hu\n", iconn->portal_port); - spin_unlock_bh(&iconn->session->lock); - break; case ISCSI_PARAM_CONN_ADDRESS: - spin_lock_bh(&iconn->session->lock); - len = sprintf(buf, "%s\n", iconn->portal_address); - spin_unlock_bh(&iconn->session->lock); - break; + if (!cep) + return -ENOTCONN; + + csk = cep->csk; + if (!csk) + return -ENOTCONN; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &csk->daddr, param, buf); default: - return iscsi_conn_get_param(cls_conn, param, buf); + return -ENOSYS; } return len; } -EXPORT_SYMBOL_GPL(cxgbi_get_conn_param); +EXPORT_SYMBOL_GPL(cxgbi_get_ep_param); struct iscsi_cls_conn * cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid) @@ -2289,11 +2282,6 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session, cxgbi_conn_max_xmit_dlength(conn); cxgbi_conn_max_recv_dlength(conn); - spin_lock_bh(&conn->session->lock); - sprintf(conn->portal_address, "%pI4", &csk->daddr.sin_addr.s_addr); - conn->portal_port = ntohs(csk->daddr.sin_port); - spin_unlock_bh(&conn->session->lock); - log_debug(1 << CXGBI_DBG_ISCSI, "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n", cls_session, cls_conn, ep, cconn, csk); diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index c57d59db000c..0a20fd5f7102 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -131,7 +131,6 @@ struct cxgbi_ddp_info { unsigned int rsvd_tag_mask; spinlock_t map_lock; struct cxgbi_gather_list **gl_map; - struct sk_buff **gl_skb; }; #define DDP_PGIDX_MAX 4 @@ -536,8 +535,6 @@ struct cxgbi_device { struct cxgbi_ddp_info *ddp; void (*dev_ddp_cleanup)(struct cxgbi_device *); - void (*csk_ddp_free_gl_skb)(struct cxgbi_ddp_info *, int, int); - int (*csk_ddp_alloc_gl_skb)(struct cxgbi_ddp_info *, int, int, gfp_t); int (*csk_ddp_set)(struct cxgbi_sock *, struct cxgbi_pagepod_hdr *, unsigned int, unsigned int, struct cxgbi_gather_list *); @@ -715,7 +712,7 @@ void cxgbi_cleanup_task(struct iscsi_task *task); void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); int cxgbi_set_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *, int); -int cxgbi_get_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *); +int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param, char *); struct iscsi_cls_conn *cxgbi_create_conn(struct iscsi_cls_session *, u32); int cxgbi_bind_conn(struct iscsi_cls_session *, struct iscsi_cls_conn *, u64, int); diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index b837c5b3c8f9..564e6ecd17c2 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -25,16 +25,9 @@ #include <scsi/scsi_dh.h> #include "../scsi_priv.h" -struct scsi_dh_devinfo_list { - struct list_head node; - char vendor[9]; - char model[17]; - struct scsi_device_handler *handler; -}; - static DEFINE_SPINLOCK(list_lock); static LIST_HEAD(scsi_dh_list); -static LIST_HEAD(scsi_dh_dev_list); +static int scsi_dh_list_idx = 1; static struct scsi_device_handler *get_device_handler(const char *name) { @@ -51,40 +44,18 @@ static struct scsi_device_handler *get_device_handler(const char *name) return found; } - -static struct scsi_device_handler * -scsi_dh_cache_lookup(struct scsi_device *sdev) +static struct scsi_device_handler *get_device_handler_by_idx(int idx) { - struct scsi_dh_devinfo_list *tmp; - struct scsi_device_handler *found_dh = NULL; + struct scsi_device_handler *tmp, *found = NULL; spin_lock(&list_lock); - list_for_each_entry(tmp, &scsi_dh_dev_list, node) { - if (!strncmp(sdev->vendor, tmp->vendor, strlen(tmp->vendor)) && - !strncmp(sdev->model, tmp->model, strlen(tmp->model))) { - found_dh = tmp->handler; + list_for_each_entry(tmp, &scsi_dh_list, list) { + if (tmp->idx == idx) { + found = tmp; break; } } spin_unlock(&list_lock); - - return found_dh; -} - -static int scsi_dh_handler_lookup(struct scsi_device_handler *scsi_dh, - struct scsi_device *sdev) -{ - int i, found = 0; - - for(i = 0; scsi_dh->devlist[i].vendor; i++) { - if (!strncmp(sdev->vendor, scsi_dh->devlist[i].vendor, - strlen(scsi_dh->devlist[i].vendor)) && - !strncmp(sdev->model, scsi_dh->devlist[i].model, - strlen(scsi_dh->devlist[i].model))) { - found = 1; - break; - } - } return found; } @@ -102,41 +73,14 @@ device_handler_match(struct scsi_device_handler *scsi_dh, struct scsi_device *sdev) { struct scsi_device_handler *found_dh = NULL; - struct scsi_dh_devinfo_list *tmp; + int idx; - found_dh = scsi_dh_cache_lookup(sdev); - if (found_dh) - return found_dh; + idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model, + SCSI_DEVINFO_DH); + found_dh = get_device_handler_by_idx(idx); - if (scsi_dh) { - if (scsi_dh_handler_lookup(scsi_dh, sdev)) - found_dh = scsi_dh; - } else { - struct scsi_device_handler *tmp_dh; - - spin_lock(&list_lock); - list_for_each_entry(tmp_dh, &scsi_dh_list, list) { - if (scsi_dh_handler_lookup(tmp_dh, sdev)) - found_dh = tmp_dh; - } - spin_unlock(&list_lock); - } - - if (found_dh) { /* If device is found, add it to the cache */ - tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); - if (tmp) { - strncpy(tmp->vendor, sdev->vendor, 8); - strncpy(tmp->model, sdev->model, 16); - tmp->vendor[8] = '\0'; - tmp->model[16] = '\0'; - tmp->handler = found_dh; - spin_lock(&list_lock); - list_add(&tmp->node, &scsi_dh_dev_list); - spin_unlock(&list_lock); - } else { - found_dh = NULL; - } - } + if (scsi_dh && found_dh != scsi_dh) + found_dh = NULL; return found_dh; } @@ -373,12 +317,25 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data) */ int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) { + int i; + if (get_device_handler(scsi_dh->name)) return -EBUSY; spin_lock(&list_lock); + scsi_dh->idx = scsi_dh_list_idx++; list_add(&scsi_dh->list, &scsi_dh_list); spin_unlock(&list_lock); + + for (i = 0; scsi_dh->devlist[i].vendor; i++) { + scsi_dev_info_list_add_keyed(0, + scsi_dh->devlist[i].vendor, + scsi_dh->devlist[i].model, + NULL, + scsi_dh->idx, + SCSI_DEVINFO_DH); + } + bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add); printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name); @@ -395,7 +352,7 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler); */ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) { - struct scsi_dh_devinfo_list *tmp, *pos; + int i; if (!get_device_handler(scsi_dh->name)) return -ENODEV; @@ -403,14 +360,14 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_remove); + for (i = 0; scsi_dh->devlist[i].vendor; i++) { + scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor, + scsi_dh->devlist[i].model, + SCSI_DEVINFO_DH); + } + spin_lock(&list_lock); list_del(&scsi_dh->list); - list_for_each_entry_safe(pos, tmp, &scsi_dh_dev_list, node) { - if (pos->handler == scsi_dh) { - list_del(&pos->node); - kfree(pos); - } - } spin_unlock(&list_lock); printk(KERN_INFO "%s: device handler unregistered\n", scsi_dh->name); @@ -576,6 +533,10 @@ static int __init scsi_dh_init(void) { int r; + r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler"); + if (r) + return r; + r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb); if (!r) @@ -590,6 +551,7 @@ static void __exit scsi_dh_exit(void) bus_for_each_dev(&scsi_bus_type, NULL, NULL, scsi_dh_sysfs_attr_remove); bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb); + scsi_dev_info_remove_list(SCSI_DEVINFO_DH); } module_init(scsi_dh_init); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 6b729324b8d3..7cae0bc85390 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -253,13 +253,15 @@ static void stpg_endio(struct request *req, int error) { struct alua_dh_data *h = req->end_io_data; struct scsi_sense_hdr sense_hdr; - unsigned err = SCSI_DH_IO; + unsigned err = SCSI_DH_OK; if (error || host_byte(req->errors) != DID_OK || - msg_byte(req->errors) != COMMAND_COMPLETE) + msg_byte(req->errors) != COMMAND_COMPLETE) { + err = SCSI_DH_IO; goto done; + } - if (err == SCSI_DH_IO && h->senselen > 0) { + if (h->senselen > 0) { err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); if (!err) { @@ -285,7 +287,8 @@ static void stpg_endio(struct request *req, int error) print_alua_state(h->state)); } done: - blk_put_request(req); + req->end_io_data = NULL; + __blk_put_request(req->q, req); if (h->callback_fn) { h->callback_fn(h->callback_data, err); h->callback_fn = h->callback_data = NULL; @@ -303,7 +306,6 @@ done: static unsigned submit_stpg(struct alua_dh_data *h) { struct request *rq; - int err = SCSI_DH_RES_TEMP_UNAVAIL; int stpg_len = 8; struct scsi_device *sdev = h->sdev; @@ -332,7 +334,7 @@ static unsigned submit_stpg(struct alua_dh_data *h) rq->end_io_data = h; blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio); - return err; + return SCSI_DH_OK; } /* @@ -730,7 +732,9 @@ static const struct scsi_dh_devlist alua_dev_list[] = { {"Pillar", "Axiom" }, {"Intel", "Multi-Flex"}, {"NETAPP", "LUN"}, + {"NETAPP", "LUN C-Mode"}, {"AIX", "NVDISK"}, + {"Promise", "VTrak"}, {NULL, NULL} }; @@ -759,7 +763,7 @@ static int alua_bus_attach(struct scsi_device *sdev) unsigned long flags; int err = SCSI_DH_OK; - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index 6faf472f7537..48441f6908a4 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -650,7 +650,7 @@ static int clariion_bus_attach(struct scsi_device *sdev) unsigned long flags; int err; - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index e3916641e627..b479f1eef968 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -225,7 +225,8 @@ static void start_stop_endio(struct request *req, int error) } } done: - blk_put_request(req); + req->end_io_data = NULL; + __blk_put_request(req->q, req); if (h->callback_fn) { h->callback_fn(h->callback_data, err); h->callback_fn = h->callback_data = NULL; @@ -338,8 +339,8 @@ static int hp_sw_bus_attach(struct scsi_device *sdev) unsigned long flags; int ret; - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) - + sizeof(struct hp_sw_dh_data) , GFP_KERNEL); + scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n", HP_SW_NAME); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 5be3ae15cb71..293c183dfe6d 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -281,11 +281,13 @@ static struct request *get_rdac_req(struct scsi_device *sdev, } static struct request *rdac_failover_get(struct scsi_device *sdev, - struct rdac_dh_data *h) + struct rdac_dh_data *h, struct list_head *list) { struct request *rq; struct rdac_mode_common *common; unsigned data_size; + struct rdac_queue_data *qdata; + u8 *lun_table; if (h->ctlr->use_ms10) { struct rdac_pg_expanded *rdac_pg; @@ -298,6 +300,7 @@ static struct request *rdac_failover_get(struct scsi_device *sdev, rdac_pg->subpage_code = 0x1; rdac_pg->page_len[0] = 0x01; rdac_pg->page_len[1] = 0x28; + lun_table = rdac_pg->lun_table; } else { struct rdac_pg_legacy *rdac_pg; @@ -307,11 +310,16 @@ static struct request *rdac_failover_get(struct scsi_device *sdev, common = &rdac_pg->common; rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; rdac_pg->page_len = 0x68; + lun_table = rdac_pg->lun_table; } common->rdac_mode[1] = RDAC_MODE_TRANSFER_SPECIFIED_LUNS; common->quiescence_timeout = RDAC_QUIESCENCE_TIME; common->rdac_options = RDAC_FORCED_QUIESENCE; + list_for_each_entry(qdata, list, entry) { + lun_table[qdata->h->lun] = 0x81; + } + /* get request for block layer packet command */ rq = get_rdac_req(sdev, &h->ctlr->mode_select, data_size, WRITE); if (!rq) @@ -565,7 +573,6 @@ static void send_mode_select(struct work_struct *work) int err, retry_cnt = RDAC_RETRY_COUNT; struct rdac_queue_data *tmp, *qdata; LIST_HEAD(list); - u8 *lun_table; spin_lock(&ctlr->ms_lock); list_splice_init(&ctlr->ms_head, &list); @@ -573,21 +580,12 @@ static void send_mode_select(struct work_struct *work) ctlr->ms_sdev = NULL; spin_unlock(&ctlr->ms_lock); - if (ctlr->use_ms10) - lun_table = ctlr->mode_select.expanded.lun_table; - else - lun_table = ctlr->mode_select.legacy.lun_table; - retry: err = SCSI_DH_RES_TEMP_UNAVAIL; - rq = rdac_failover_get(sdev, h); + rq = rdac_failover_get(sdev, h, &list); if (!rq) goto done; - list_for_each_entry(qdata, &list, entry) { - lun_table[qdata->h->lun] = 0x81; - } - RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " "%s MODE_SELECT command", (char *) h->ctlr->array_name, h->ctlr->index, @@ -769,6 +767,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { {"DELL", "MD32xx"}, {"DELL", "MD32xxi"}, {"DELL", "MD36xxi"}, + {"DELL", "MD36xxf"}, {"LSI", "INF-01-00"}, {"ENGENIO", "INF-01-00"}, {"STK", "FLEXLINE 380"}, @@ -800,7 +799,7 @@ static int rdac_bus_attach(struct scsi_device *sdev) int err; char array_name[ARRAY_LABEL_LEN]; - scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *) + scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", @@ -906,4 +905,5 @@ module_exit(rdac_exit); MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver"); MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); +MODULE_VERSION("01.00.0000.0000"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile index 950f27615c76..f6d37d0271f7 100644 --- a/drivers/scsi/fcoe/Makefile +++ b/drivers/scsi/fcoe/Makefile @@ -1,2 +1,4 @@ obj-$(CONFIG_FCOE) += fcoe.o obj-$(CONFIG_LIBFCOE) += libfcoe.o + +libfcoe-objs := fcoe_ctlr.o fcoe_transport.o diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 3becc6a20a4f..bde6ee5333eb 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -31,6 +31,7 @@ #include <linux/fs.h> #include <linux/sysfs.h> #include <linux/ctype.h> +#include <linux/workqueue.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> #include <scsi/scsi_transport.h> @@ -58,6 +59,8 @@ MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ DEFINE_MUTEX(fcoe_config_mutex); +static struct workqueue_struct *fcoe_wq; + /* fcoe_percpu_clean completion. Waiter protected by fcoe_create_mutex */ static DECLARE_COMPLETION(fcoe_flush_completion); @@ -72,7 +75,6 @@ static int fcoe_xmit(struct fc_lport *, struct fc_frame *); static int fcoe_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static int fcoe_percpu_receive_thread(void *); -static void fcoe_clean_pending_queue(struct fc_lport *); static void fcoe_percpu_clean(struct fc_lport *); static int fcoe_link_speed_update(struct fc_lport *); static int fcoe_link_ok(struct fc_lport *); @@ -80,7 +82,6 @@ static int fcoe_link_ok(struct fc_lport *); static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); static int fcoe_hostlist_add(const struct fc_lport *); -static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *); static int fcoe_device_notification(struct notifier_block *, ulong, void *); static void fcoe_dev_setup(void); static void fcoe_dev_cleanup(void); @@ -101,10 +102,11 @@ static int fcoe_ddp_done(struct fc_lport *, u16); static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *); -static int fcoe_create(const char *, struct kernel_param *); -static int fcoe_destroy(const char *, struct kernel_param *); -static int fcoe_enable(const char *, struct kernel_param *); -static int fcoe_disable(const char *, struct kernel_param *); +static bool fcoe_match(struct net_device *netdev); +static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode); +static int fcoe_destroy(struct net_device *netdev); +static int fcoe_enable(struct net_device *netdev); +static int fcoe_disable(struct net_device *netdev); static struct fc_seq *fcoe_elsct_send(struct fc_lport *, u32 did, struct fc_frame *, @@ -117,24 +119,6 @@ static void fcoe_recv_frame(struct sk_buff *skb); static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); -module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR); -__MODULE_PARM_TYPE(create, "string"); -MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); -module_param_call(create_vn2vn, fcoe_create, NULL, - (void *)FIP_MODE_VN2VN, S_IWUSR); -__MODULE_PARM_TYPE(create_vn2vn, "string"); -MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance " - "on an Ethernet interface"); -module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR); -__MODULE_PARM_TYPE(destroy, "string"); -MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface"); -module_param_call(enable, fcoe_enable, NULL, NULL, S_IWUSR); -__MODULE_PARM_TYPE(enable, "string"); -MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface."); -module_param_call(disable, fcoe_disable, NULL, NULL, S_IWUSR); -__MODULE_PARM_TYPE(disable, "string"); -MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); - /* notification function for packets from net device */ static struct notifier_block fcoe_notifier = { .notifier_call = fcoe_device_notification, @@ -145,8 +129,8 @@ static struct notifier_block fcoe_cpu_notifier = { .notifier_call = fcoe_cpu_callback, }; -static struct scsi_transport_template *fcoe_transport_template; -static struct scsi_transport_template *fcoe_vport_transport_template; +static struct scsi_transport_template *fcoe_nport_scsi_transport; +static struct scsi_transport_template *fcoe_vport_scsi_transport; static int fcoe_vport_destroy(struct fc_vport *); static int fcoe_vport_create(struct fc_vport *, bool disabled); @@ -163,7 +147,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = { .lport_set_port_id = fcoe_set_port_id, }; -struct fc_function_template fcoe_transport_function = { +struct fc_function_template fcoe_nport_fc_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, @@ -203,7 +187,7 @@ struct fc_function_template fcoe_transport_function = { .bsg_request = fc_lport_bsg_request, }; -struct fc_function_template fcoe_vport_transport_function = { +struct fc_function_template fcoe_vport_fc_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, @@ -354,10 +338,18 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, struct fcoe_interface *fcoe; int err; + if (!try_module_get(THIS_MODULE)) { + FCOE_NETDEV_DBG(netdev, + "Could not get a reference to the module\n"); + fcoe = ERR_PTR(-EBUSY); + goto out; + } + fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); if (!fcoe) { FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); - return NULL; + fcoe = ERR_PTR(-ENOMEM); + goto out_nomod; } dev_hold(netdev); @@ -376,9 +368,15 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, fcoe_ctlr_destroy(&fcoe->ctlr); kfree(fcoe); dev_put(netdev); - return NULL; + fcoe = ERR_PTR(err); + goto out_nomod; } + goto out; + +out_nomod: + module_put(THIS_MODULE); +out: return fcoe; } @@ -440,6 +438,7 @@ static void fcoe_interface_release(struct kref *kref) fcoe_ctlr_destroy(&fcoe->ctlr); kfree(fcoe); dev_put(netdev); + module_put(THIS_MODULE); } /** @@ -503,7 +502,7 @@ static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr) { struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; rtnl_lock(); if (!is_zero_ether_addr(port->data_src_addr)) @@ -559,17 +558,6 @@ static int fcoe_lport_config(struct fc_lport *lport) } /** - * fcoe_queue_timer() - The fcoe queue timer - * @lport: The local port - * - * Calls fcoe_check_wait_queue on timeout - */ -static void fcoe_queue_timer(ulong lport) -{ - fcoe_check_wait_queue((struct fc_lport *)lport, NULL); -} - -/** * fcoe_get_wwn() - Get the world wide name from LLD if it supports it * @netdev: the associated net device * @wwn: the output WWN @@ -648,7 +636,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) /* Setup lport private data to point to fcoe softc */ port = lport_priv(lport); - fcoe = port->fcoe; + fcoe = port->priv; /* * Determine max frame size based on underlying device and optional @@ -706,9 +694,9 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev) lport->host->max_cmd_len = FCOE_MAX_CMD_LEN; if (lport->vport) - lport->host->transportt = fcoe_vport_transport_template; + lport->host->transportt = fcoe_vport_scsi_transport; else - lport->host->transportt = fcoe_transport_template; + lport->host->transportt = fcoe_nport_scsi_transport; /* add the new host to the SCSI-ml */ rc = scsi_add_host(lport->host, dev); @@ -758,7 +746,7 @@ bool fcoe_oem_match(struct fc_frame *fp) static inline int fcoe_em_config(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; struct fcoe_interface *oldfcoe = NULL; struct net_device *old_real_dev, *cur_real_dev; u16 min_xid = FCOE_MIN_XID; @@ -842,7 +830,7 @@ skip_oem: static void fcoe_if_destroy(struct fc_lport *lport) { struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; struct net_device *netdev = fcoe->netdev; FCOE_NETDEV_DBG(netdev, "Destroying interface\n"); @@ -884,7 +872,6 @@ static void fcoe_if_destroy(struct fc_lport *lport) /* Release the Scsi_Host */ scsi_host_put(lport->host); - module_put(THIS_MODULE); } /** @@ -939,8 +926,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, struct device *parent, int npiv) { struct net_device *netdev = fcoe->netdev; - struct fc_lport *lport = NULL; + struct fc_lport *lport, *n_port; struct fcoe_port *port; + struct Scsi_Host *shost; int rc; /* * parent is only a vport if npiv is 1, @@ -950,13 +938,11 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, FCOE_NETDEV_DBG(netdev, "Create Interface\n"); - if (!npiv) { - lport = libfc_host_alloc(&fcoe_shost_template, - sizeof(struct fcoe_port)); - } else { - lport = libfc_vport_create(vport, - sizeof(struct fcoe_port)); - } + if (!npiv) + lport = libfc_host_alloc(&fcoe_shost_template, sizeof(*port)); + else + lport = libfc_vport_create(vport, sizeof(*port)); + if (!lport) { FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n"); rc = -ENOMEM; @@ -964,7 +950,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, } port = lport_priv(lport); port->lport = lport; - port->fcoe = fcoe; + port->priv = fcoe; + port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH; + port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH; INIT_WORK(&port->destroy_work, fcoe_destroy_work); /* configure a fc_lport including the exchange manager */ @@ -1007,24 +995,27 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, goto out_lp_destroy; } - if (!npiv) { - /* - * fcoe_em_alloc() and fcoe_hostlist_add() both - * need to be atomic with respect to other changes to the - * hostlist since fcoe_em_alloc() looks for an existing EM - * instance on host list updated by fcoe_hostlist_add(). - * - * This is currently handled through the fcoe_config_mutex - * begin held. - */ - + /* + * fcoe_em_alloc() and fcoe_hostlist_add() both + * need to be atomic with respect to other changes to the + * hostlist since fcoe_em_alloc() looks for an existing EM + * instance on host list updated by fcoe_hostlist_add(). + * + * This is currently handled through the fcoe_config_mutex + * begin held. + */ + if (!npiv) /* lport exch manager allocation */ rc = fcoe_em_config(lport); - if (rc) { - FCOE_NETDEV_DBG(netdev, "Could not configure the EM " - "for the interface\n"); - goto out_lp_destroy; - } + else { + shost = vport_to_shost(vport); + n_port = shost_priv(shost); + rc = fc_exch_mgr_list_clone(n_port, lport); + } + + if (rc) { + FCOE_NETDEV_DBG(netdev, "Could not configure the EM\n"); + goto out_lp_destroy; } fcoe_interface_get(fcoe); @@ -1048,11 +1039,12 @@ out: static int __init fcoe_if_init(void) { /* attach to scsi transport */ - fcoe_transport_template = fc_attach_transport(&fcoe_transport_function); - fcoe_vport_transport_template = - fc_attach_transport(&fcoe_vport_transport_function); + fcoe_nport_scsi_transport = + fc_attach_transport(&fcoe_nport_fc_functions); + fcoe_vport_scsi_transport = + fc_attach_transport(&fcoe_vport_fc_functions); - if (!fcoe_transport_template) { + if (!fcoe_nport_scsi_transport) { printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); return -ENODEV; } @@ -1069,10 +1061,10 @@ static int __init fcoe_if_init(void) */ int __exit fcoe_if_exit(void) { - fc_release_transport(fcoe_transport_template); - fc_release_transport(fcoe_vport_transport_template); - fcoe_transport_template = NULL; - fcoe_vport_transport_template = NULL; + fc_release_transport(fcoe_nport_scsi_transport); + fc_release_transport(fcoe_vport_scsi_transport); + fcoe_nport_scsi_transport = NULL; + fcoe_vport_scsi_transport = NULL; return 0; } @@ -1359,108 +1351,22 @@ err2: } /** - * fcoe_start_io() - Start FCoE I/O - * @skb: The packet to be transmitted - * - * This routine is called from the net device to start transmitting - * FCoE packets. - * - * Returns: 0 for success - */ -static inline int fcoe_start_io(struct sk_buff *skb) -{ - struct sk_buff *nskb; - int rc; - - nskb = skb_clone(skb, GFP_ATOMIC); - rc = dev_queue_xmit(nskb); - if (rc != 0) - return rc; - kfree_skb(skb); - return 0; -} - -/** - * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC + * fcoe_alloc_paged_crc_eof() - Allocate a page to be used for the trailer CRC * @skb: The packet to be transmitted * @tlen: The total length of the trailer * - * This routine allocates a page for frame trailers. The page is re-used if - * there is enough room left on it for the current trailer. If there isn't - * enough buffer left a new page is allocated for the trailer. Reference to - * the page from this function as well as the skbs using the page fragments - * ensure that the page is freed at the appropriate time. - * * Returns: 0 for success */ -static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen) +static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) { struct fcoe_percpu_s *fps; - struct page *page; + int rc; fps = &get_cpu_var(fcoe_percpu); - page = fps->crc_eof_page; - if (!page) { - page = alloc_page(GFP_ATOMIC); - if (!page) { - put_cpu_var(fcoe_percpu); - return -ENOMEM; - } - fps->crc_eof_page = page; - fps->crc_eof_offset = 0; - } - - get_page(page); - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, - fps->crc_eof_offset, tlen); - skb->len += tlen; - skb->data_len += tlen; - skb->truesize += tlen; - fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); - - if (fps->crc_eof_offset >= PAGE_SIZE) { - fps->crc_eof_page = NULL; - fps->crc_eof_offset = 0; - put_page(page); - } + rc = fcoe_get_paged_crc_eof(skb, tlen, fps); put_cpu_var(fcoe_percpu); - return 0; -} -/** - * fcoe_fc_crc() - Calculates the CRC for a given frame - * @fp: The frame to be checksumed - * - * This uses crc32() routine to calculate the CRC for a frame - * - * Return: The 32 bit CRC value - */ -u32 fcoe_fc_crc(struct fc_frame *fp) -{ - struct sk_buff *skb = fp_skb(fp); - struct skb_frag_struct *frag; - unsigned char *data; - unsigned long off, len, clen; - u32 crc; - unsigned i; - - crc = crc32(~0, skb->data, skb_headlen(skb)); - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - frag = &skb_shinfo(skb)->frags[i]; - off = frag->page_offset; - len = frag->size; - while (len > 0) { - clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); - data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), - KM_SKB_DATA_SOFTIRQ); - crc = crc32(crc, data + (off & ~PAGE_MASK), clen); - kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); - off += clen; - len -= clen; - } - } - return crc; + return rc; } /** @@ -1483,7 +1389,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) unsigned int tlen; /* trailer length */ unsigned int elen; /* eth header, may include vlan */ struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; u8 sof, eof; struct fcoe_hdr *hp; @@ -1524,7 +1430,7 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) /* copy port crc and eof to the skb buff */ if (skb_is_nonlinear(skb)) { skb_frag_t *frag; - if (fcoe_get_paged_crc_eof(skb, tlen)) { + if (fcoe_alloc_paged_crc_eof(skb, tlen)) { kfree_skb(skb); return -ENOMEM; } @@ -1604,6 +1510,56 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb) } /** + * fcoe_filter_frames() - filter out bad fcoe frames, i.e. bad CRC + * @lport: The local port the frame was received on + * @fp: The received frame + * + * Return: 0 on passing filtering checks + */ +static inline int fcoe_filter_frames(struct fc_lport *lport, + struct fc_frame *fp) +{ + struct fcoe_interface *fcoe; + struct fc_frame_header *fh; + struct sk_buff *skb = (struct sk_buff *)fp; + struct fcoe_dev_stats *stats; + + /* + * We only check CRC if no offload is available and if it is + * it's solicited data, in which case, the FCP layer would + * check it during the copy. + */ + if (lport->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY) + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + else + fr_flags(fp) |= FCPHF_CRC_UNCHECKED; + + fh = (struct fc_frame_header *) skb_transport_header(skb); + fh = fc_frame_header_get(fp); + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) + return 0; + + fcoe = ((struct fcoe_port *)lport_priv(lport))->priv; + if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && + ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { + FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n"); + return -EINVAL; + } + + if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED) || + le32_to_cpu(fr_crc(fp)) == ~crc32(~0, skb->data, skb->len)) { + fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + return 0; + } + + stats = per_cpu_ptr(lport->dev_stats, get_cpu()); + stats->InvalidCRCCount++; + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); + return -EINVAL; +} + +/** * fcoe_recv_frame() - process a single received frame * @skb: frame to process */ @@ -1613,7 +1569,6 @@ static void fcoe_recv_frame(struct sk_buff *skb) struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fcoe_dev_stats *stats; - struct fc_frame_header *fh; struct fcoe_crc_eof crc_eof; struct fc_frame *fp; struct fcoe_port *port; @@ -1644,7 +1599,6 @@ static void fcoe_recv_frame(struct sk_buff *skb) * was done in fcoe_rcv already. */ hp = (struct fcoe_hdr *) skb_network_header(skb); - fh = (struct fc_frame_header *) skb_transport_header(skb); stats = per_cpu_ptr(lport->dev_stats, get_cpu()); if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { @@ -1677,35 +1631,11 @@ static void fcoe_recv_frame(struct sk_buff *skb) if (pskb_trim(skb, fr_len)) goto drop; - /* - * We only check CRC if no offload is available and if it is - * it's solicited data, in which case, the FCP layer would - * check it during the copy. - */ - if (lport->crc_offload && - skb->ip_summed == CHECKSUM_UNNECESSARY) - fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; - else - fr_flags(fp) |= FCPHF_CRC_UNCHECKED; - - fh = fc_frame_header_get(fp); - if ((fh->fh_r_ctl != FC_RCTL_DD_SOL_DATA || - fh->fh_type != FC_TYPE_FCP) && - (fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { - if (le32_to_cpu(fr_crc(fp)) != - ~crc32(~0, skb->data, fr_len)) { - if (stats->InvalidCRCCount < 5) - printk(KERN_WARNING "fcoe: dropping " - "frame with CRC error\n"); - stats->InvalidCRCCount++; - goto drop; - } - fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED; + if (!fcoe_filter_frames(lport, fp)) { + put_cpu(); + fc_exch_recv(lport, fp); + return; } - put_cpu(); - fc_exch_recv(lport, fp); - return; - drop: stats->ErrorFrames++; put_cpu(); @@ -1744,64 +1674,6 @@ int fcoe_percpu_receive_thread(void *arg) } /** - * fcoe_check_wait_queue() - Attempt to clear the transmit backlog - * @lport: The local port whose backlog is to be cleared - * - * This empties the wait_queue, dequeues the head of the wait_queue queue - * and calls fcoe_start_io() for each packet. If all skb have been - * transmitted it returns the qlen. If an error occurs it restores - * wait_queue (to try again later) and returns -1. - * - * The wait_queue is used when the skb transmit fails. The failed skb - * will go in the wait_queue which will be emptied by the timer function or - * by the next skb transmit. - */ -static void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) -{ - struct fcoe_port *port = lport_priv(lport); - int rc; - - spin_lock_bh(&port->fcoe_pending_queue.lock); - - if (skb) - __skb_queue_tail(&port->fcoe_pending_queue, skb); - - if (port->fcoe_pending_queue_active) - goto out; - port->fcoe_pending_queue_active = 1; - - while (port->fcoe_pending_queue.qlen) { - /* keep qlen > 0 until fcoe_start_io succeeds */ - port->fcoe_pending_queue.qlen++; - skb = __skb_dequeue(&port->fcoe_pending_queue); - - spin_unlock_bh(&port->fcoe_pending_queue.lock); - rc = fcoe_start_io(skb); - spin_lock_bh(&port->fcoe_pending_queue.lock); - - if (rc) { - __skb_queue_head(&port->fcoe_pending_queue, skb); - /* undo temporary increment above */ - port->fcoe_pending_queue.qlen--; - break; - } - /* undo temporary increment above */ - port->fcoe_pending_queue.qlen--; - } - - if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) - lport->qfull = 0; - if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) - mod_timer(&port->timer, jiffies + 2); - port->fcoe_pending_queue_active = 0; -out: - if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - lport->qfull = 1; - spin_unlock_bh(&port->fcoe_pending_queue.lock); - return; -} - -/** * fcoe_dev_setup() - Setup the link change notification interface */ static void fcoe_dev_setup(void) @@ -1872,7 +1744,7 @@ static int fcoe_device_notification(struct notifier_block *notifier, list_del(&fcoe->list); port = lport_priv(fcoe->ctlr.lp); fcoe_interface_cleanup(fcoe); - schedule_work(&port->destroy_work); + queue_work(fcoe_wq, &port->destroy_work); goto out; break; case NETDEV_FEAT_CHANGE: @@ -1898,39 +1770,16 @@ out: } /** - * fcoe_if_to_netdev() - Parse a name buffer to get a net device - * @buffer: The name of the net device - * - * Returns: NULL or a ptr to net_device - */ -static struct net_device *fcoe_if_to_netdev(const char *buffer) -{ - char *cp; - char ifname[IFNAMSIZ + 2]; - - if (buffer) { - strlcpy(ifname, buffer, IFNAMSIZ); - cp = ifname + strlen(ifname); - while (--cp >= ifname && *cp == '\n') - *cp = '\0'; - return dev_get_by_name(&init_net, ifname); - } - return NULL; -} - -/** * fcoe_disable() - Disables a FCoE interface - * @buffer: The name of the Ethernet interface to be disabled - * @kp: The associated kernel parameter + * @netdev : The net_device object the Ethernet interface to create on * - * Called from sysfs. + * Called from fcoe transport. * * Returns: 0 for success */ -static int fcoe_disable(const char *buffer, struct kernel_param *kp) +static int fcoe_disable(struct net_device *netdev) { struct fcoe_interface *fcoe; - struct net_device *netdev; int rc = 0; mutex_lock(&fcoe_config_mutex); @@ -1946,16 +1795,9 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp) } #endif - netdev = fcoe_if_to_netdev(buffer); - if (!netdev) { - rc = -ENODEV; - goto out_nodev; - } - if (!rtnl_trylock()) { - dev_put(netdev); mutex_unlock(&fcoe_config_mutex); - return restart_syscall(); + return -ERESTARTSYS; } fcoe = fcoe_hostlist_lookup_port(netdev); @@ -1967,7 +1809,6 @@ static int fcoe_disable(const char *buffer, struct kernel_param *kp) } else rc = -ENODEV; - dev_put(netdev); out_nodev: mutex_unlock(&fcoe_config_mutex); return rc; @@ -1975,17 +1816,15 @@ out_nodev: /** * fcoe_enable() - Enables a FCoE interface - * @buffer: The name of the Ethernet interface to be enabled - * @kp: The associated kernel parameter + * @netdev : The net_device object the Ethernet interface to create on * - * Called from sysfs. + * Called from fcoe transport. * * Returns: 0 for success */ -static int fcoe_enable(const char *buffer, struct kernel_param *kp) +static int fcoe_enable(struct net_device *netdev) { struct fcoe_interface *fcoe; - struct net_device *netdev; int rc = 0; mutex_lock(&fcoe_config_mutex); @@ -2000,17 +1839,9 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp) goto out_nodev; } #endif - - netdev = fcoe_if_to_netdev(buffer); - if (!netdev) { - rc = -ENODEV; - goto out_nodev; - } - if (!rtnl_trylock()) { - dev_put(netdev); mutex_unlock(&fcoe_config_mutex); - return restart_syscall(); + return -ERESTARTSYS; } fcoe = fcoe_hostlist_lookup_port(netdev); @@ -2021,7 +1852,6 @@ static int fcoe_enable(const char *buffer, struct kernel_param *kp) else if (!fcoe_link_ok(fcoe->ctlr.lp)) fcoe_ctlr_link_up(&fcoe->ctlr); - dev_put(netdev); out_nodev: mutex_unlock(&fcoe_config_mutex); return rc; @@ -2029,17 +1859,15 @@ out_nodev: /** * fcoe_destroy() - Destroy a FCoE interface - * @buffer: The name of the Ethernet interface to be destroyed - * @kp: The associated kernel parameter + * @netdev : The net_device object the Ethernet interface to create on * - * Called from sysfs. + * Called from fcoe transport * * Returns: 0 for success */ -static int fcoe_destroy(const char *buffer, struct kernel_param *kp) +static int fcoe_destroy(struct net_device *netdev) { struct fcoe_interface *fcoe; - struct net_device *netdev; int rc = 0; mutex_lock(&fcoe_config_mutex); @@ -2054,32 +1882,21 @@ static int fcoe_destroy(const char *buffer, struct kernel_param *kp) goto out_nodev; } #endif - - netdev = fcoe_if_to_netdev(buffer); - if (!netdev) { - rc = -ENODEV; - goto out_nodev; - } - if (!rtnl_trylock()) { - dev_put(netdev); mutex_unlock(&fcoe_config_mutex); - return restart_syscall(); + return -ERESTARTSYS; } fcoe = fcoe_hostlist_lookup_port(netdev); if (!fcoe) { rtnl_unlock(); rc = -ENODEV; - goto out_putdev; + goto out_nodev; } fcoe_interface_cleanup(fcoe); list_del(&fcoe->list); /* RTNL mutex is dropped by fcoe_if_destroy */ fcoe_if_destroy(fcoe->ctlr.lp); - -out_putdev: - dev_put(netdev); out_nodev: mutex_unlock(&fcoe_config_mutex); return rc; @@ -2102,27 +1919,39 @@ static void fcoe_destroy_work(struct work_struct *work) } /** + * fcoe_match() - Check if the FCoE is supported on the given netdevice + * @netdev : The net_device object the Ethernet interface to create on + * + * Called from fcoe transport. + * + * Returns: always returns true as this is the default FCoE transport, + * i.e., support all netdevs. + */ +static bool fcoe_match(struct net_device *netdev) +{ + return true; +} + +/** * fcoe_create() - Create a fcoe interface - * @buffer: The name of the Ethernet interface to create on - * @kp: The associated kernel param + * @netdev : The net_device object the Ethernet interface to create on + * @fip_mode: The FIP mode for this creation * - * Called from sysfs. + * Called from fcoe transport * * Returns: 0 for success */ -static int fcoe_create(const char *buffer, struct kernel_param *kp) +static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) { - enum fip_state fip_mode = (enum fip_state)(long)kp->arg; int rc; struct fcoe_interface *fcoe; struct fc_lport *lport; - struct net_device *netdev; mutex_lock(&fcoe_config_mutex); if (!rtnl_trylock()) { mutex_unlock(&fcoe_config_mutex); - return restart_syscall(); + return -ERESTARTSYS; } #ifdef CONFIG_FCOE_MODULE @@ -2133,31 +1962,20 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) */ if (THIS_MODULE->state != MODULE_STATE_LIVE) { rc = -ENODEV; - goto out_nomod; - } -#endif - - if (!try_module_get(THIS_MODULE)) { - rc = -EINVAL; - goto out_nomod; - } - - netdev = fcoe_if_to_netdev(buffer); - if (!netdev) { - rc = -ENODEV; goto out_nodev; } +#endif /* look for existing lport */ if (fcoe_hostlist_lookup(netdev)) { rc = -EEXIST; - goto out_putdev; + goto out_nodev; } fcoe = fcoe_interface_create(netdev, fip_mode); - if (!fcoe) { - rc = -ENOMEM; - goto out_putdev; + if (IS_ERR(fcoe)) { + rc = PTR_ERR(fcoe); + goto out_nodev; } lport = fcoe_if_create(fcoe, &netdev->dev, 0); @@ -2186,18 +2004,13 @@ static int fcoe_create(const char *buffer, struct kernel_param *kp) * should be holding a reference taken in fcoe_if_create(). */ fcoe_interface_put(fcoe); - dev_put(netdev); rtnl_unlock(); mutex_unlock(&fcoe_config_mutex); return 0; out_free: fcoe_interface_put(fcoe); -out_putdev: - dev_put(netdev); out_nodev: - module_put(THIS_MODULE); -out_nomod: rtnl_unlock(); mutex_unlock(&fcoe_config_mutex); return rc; @@ -2212,8 +2025,7 @@ out_nomod: */ int fcoe_link_speed_update(struct fc_lport *lport) { - struct fcoe_port *port = lport_priv(lport); - struct net_device *netdev = port->fcoe->netdev; + struct net_device *netdev = fcoe_netdev(lport); struct ethtool_cmd ecmd = { ETHTOOL_GSET }; if (!dev_ethtool_get_settings(netdev, &ecmd)) { @@ -2244,8 +2056,7 @@ int fcoe_link_speed_update(struct fc_lport *lport) */ int fcoe_link_ok(struct fc_lport *lport) { - struct fcoe_port *port = lport_priv(lport); - struct net_device *netdev = port->fcoe->netdev; + struct net_device *netdev = fcoe_netdev(lport); if (netif_oper_up(netdev)) return 0; @@ -2309,24 +2120,6 @@ void fcoe_percpu_clean(struct fc_lport *lport) } /** - * fcoe_clean_pending_queue() - Dequeue a skb and free it - * @lport: The local port to dequeue a skb on - */ -void fcoe_clean_pending_queue(struct fc_lport *lport) -{ - struct fcoe_port *port = lport_priv(lport); - struct sk_buff *skb; - - spin_lock_bh(&port->fcoe_pending_queue.lock); - while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) { - spin_unlock_bh(&port->fcoe_pending_queue.lock); - kfree_skb(skb); - spin_lock_bh(&port->fcoe_pending_queue.lock); - } - spin_unlock_bh(&port->fcoe_pending_queue.lock); -} - -/** * fcoe_reset() - Reset a local port * @shost: The SCSI host associated with the local port to be reset * @@ -2335,7 +2128,13 @@ void fcoe_clean_pending_queue(struct fc_lport *lport) int fcoe_reset(struct Scsi_Host *shost) { struct fc_lport *lport = shost_priv(shost); - fc_lport_reset(lport); + struct fcoe_port *port = lport_priv(lport); + struct fcoe_interface *fcoe = port->priv; + + fcoe_ctlr_link_down(&fcoe->ctlr); + fcoe_clean_pending_queue(fcoe->ctlr.lp); + if (!fcoe_link_ok(fcoe->ctlr.lp)) + fcoe_ctlr_link_up(&fcoe->ctlr); return 0; } @@ -2393,12 +2192,24 @@ static int fcoe_hostlist_add(const struct fc_lport *lport) fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport)); if (!fcoe) { port = lport_priv(lport); - fcoe = port->fcoe; + fcoe = port->priv; list_add_tail(&fcoe->list, &fcoe_hostlist); } return 0; } + +static struct fcoe_transport fcoe_sw_transport = { + .name = {FCOE_TRANSPORT_DEFAULT}, + .attached = false, + .list = LIST_HEAD_INIT(fcoe_sw_transport.list), + .match = fcoe_match, + .create = fcoe_create, + .destroy = fcoe_destroy, + .enable = fcoe_enable, + .disable = fcoe_disable, +}; + /** * fcoe_init() - Initialize fcoe.ko * @@ -2410,6 +2221,18 @@ static int __init fcoe_init(void) unsigned int cpu; int rc = 0; + fcoe_wq = alloc_workqueue("fcoe", 0, 0); + if (!fcoe_wq) + return -ENOMEM; + + /* register as a fcoe transport */ + rc = fcoe_transport_attach(&fcoe_sw_transport); + if (rc) { + printk(KERN_ERR "failed to register an fcoe transport, check " + "if libfcoe is loaded\n"); + return rc; + } + mutex_lock(&fcoe_config_mutex); for_each_possible_cpu(cpu) { @@ -2440,6 +2263,7 @@ out_free: fcoe_percpu_thread_destroy(cpu); } mutex_unlock(&fcoe_config_mutex); + destroy_workqueue(fcoe_wq); return rc; } module_init(fcoe_init); @@ -2465,7 +2289,7 @@ static void __exit fcoe_exit(void) list_del(&fcoe->list); port = lport_priv(fcoe->ctlr.lp); fcoe_interface_cleanup(fcoe); - schedule_work(&port->destroy_work); + queue_work(fcoe_wq, &port->destroy_work); } rtnl_unlock(); @@ -2476,16 +2300,21 @@ static void __exit fcoe_exit(void) mutex_unlock(&fcoe_config_mutex); - /* flush any asyncronous interface destroys, - * this should happen after the netdev notifier is unregistered */ - flush_scheduled_work(); - /* That will flush out all the N_Ports on the hostlist, but now we - * may have NPIV VN_Ports scheduled for destruction */ - flush_scheduled_work(); + /* + * destroy_work's may be chained but destroy_workqueue() + * can take care of them. Just kill the fcoe_wq. + */ + destroy_workqueue(fcoe_wq); - /* detach from scsi transport - * must happen after all destroys are done, therefor after the flush */ + /* + * Detaching from the scsi transport must happen after all + * destroys are done on the fcoe_wq. destroy_workqueue will + * enusre the fcoe_wq is flushed. + */ fcoe_if_exit(); + + /* detach from fcoe transport */ + fcoe_transport_detach(&fcoe_sw_transport); } module_exit(fcoe_exit); @@ -2557,7 +2386,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did, void *arg, u32 timeout) { struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; struct fcoe_ctlr *fip = &fcoe->ctlr; struct fc_frame_header *fh = fc_frame_header_get(fp); @@ -2590,7 +2419,7 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled) struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fcoe_port *port = lport_priv(n_port); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; struct net_device *netdev = fcoe->netdev; struct fc_lport *vn_port; @@ -2630,7 +2459,7 @@ static int fcoe_vport_destroy(struct fc_vport *vport) mutex_lock(&n_port->lp_mutex); list_del(&vn_port->list); mutex_unlock(&n_port->lp_mutex); - schedule_work(&port->destroy_work); + queue_work(fcoe_wq, &port->destroy_work); return 0; } @@ -2734,7 +2563,7 @@ static void fcoe_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) { struct fcoe_port *port = lport_priv(lport); - struct fcoe_interface *fcoe = port->fcoe; + struct fcoe_interface *fcoe = port->priv; if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index c69b2c56c2d1..408a6fd78fb4 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -24,7 +24,7 @@ #include <linux/kthread.h> #define FCOE_MAX_QUEUE_DEPTH 256 -#define FCOE_LOW_QUEUE_DEPTH 32 +#define FCOE_MIN_QUEUE_DEPTH 32 #define FCOE_WORD_TO_BYTE 4 @@ -40,12 +40,6 @@ #define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */ #define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */ -/* - * Max MTU for FCoE: 14 (FCoE header) + 24 (FC header) + 2112 (max FC payload) - * + 4 (FC CRC) + 4 (FCoE trailer) = 2158 bytes - */ -#define FCOE_MTU 2158 - unsigned int fcoe_debug_logging; module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); @@ -71,21 +65,6 @@ do { \ netdev->name, ##args);) /** - * struct fcoe_percpu_s - The per-CPU context for FCoE receive threads - * @thread: The thread context - * @fcoe_rx_list: The queue of pending packets to process - * @page: The memory page for calculating frame trailer CRCs - * @crc_eof_offset: The offset into the CRC page pointing to available - * memory for a new trailer - */ -struct fcoe_percpu_s { - struct task_struct *thread; - struct sk_buff_head fcoe_rx_list; - struct page *crc_eof_page; - int crc_eof_offset; -}; - -/** * struct fcoe_interface - A FCoE interface * @list: Handle for a list of FCoE interfaces * @netdev: The associated net device @@ -108,30 +87,6 @@ struct fcoe_interface { struct kref kref; }; -/** - * struct fcoe_port - The FCoE private structure - * @fcoe: The associated fcoe interface - * @lport: The associated local port - * @fcoe_pending_queue: The pending Rx queue of skbs - * @fcoe_pending_queue_active: Indicates if the pending queue is active - * @timer: The queue timer - * @destroy_work: Handle for work context - * (to prevent RTNL deadlocks) - * @data_srt_addr: Source address for data - * - * An instance of this structure is to be allocated along with the - * Scsi_Host and libfc fc_lport structures. - */ -struct fcoe_port { - struct fcoe_interface *fcoe; - struct fc_lport *lport; - struct sk_buff_head fcoe_pending_queue; - u8 fcoe_pending_queue_active; - struct timer_list timer; - struct work_struct destroy_work; - u8 data_src_addr[ETH_ALEN]; -}; - #define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) /** @@ -140,7 +95,8 @@ struct fcoe_port { */ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport) { - return ((struct fcoe_port *)lport_priv(lport))->fcoe->netdev; + return ((struct fcoe_interface *) + ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; } #endif /* _FCOE_H_ */ diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 625c6be25396..c93f007e702f 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -44,9 +44,7 @@ #include <scsi/libfc.h> #include <scsi/libfcoe.h> -MODULE_AUTHOR("Open-FCoE.org"); -MODULE_DESCRIPTION("FIP discovery protocol support for FCoE HBAs"); -MODULE_LICENSE("GPL v2"); +#include "libfcoe.h" #define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */ #define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */ @@ -66,31 +64,7 @@ static u8 fcoe_all_enode[ETH_ALEN] = FIP_ALL_ENODE_MACS; static u8 fcoe_all_vn2vn[ETH_ALEN] = FIP_ALL_VN2VN_MACS; static u8 fcoe_all_p2p[ETH_ALEN] = FIP_ALL_P2P_MACS; -unsigned int libfcoe_debug_logging; -module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); - -#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ -#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ - -#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ -do { \ - if (unlikely(libfcoe_debug_logging & LEVEL)) \ - do { \ - CMD; \ - } while (0); \ -} while (0) - -#define LIBFCOE_DBG(fmt, args...) \ - LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ - printk(KERN_INFO "libfcoe: " fmt, ##args);) - -#define LIBFCOE_FIP_DBG(fip, fmt, args...) \ - LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ - printk(KERN_INFO "host%d: fip: " fmt, \ - (fip)->lp->host->host_no, ##args);) - -static const char *fcoe_ctlr_states[] = { +static const char * const fcoe_ctlr_states[] = { [FIP_ST_DISABLED] = "DISABLED", [FIP_ST_LINK_WAIT] = "LINK_WAIT", [FIP_ST_AUTO] = "AUTO", @@ -308,8 +282,8 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) struct fip_mac_desc mac; struct fip_wwn_desc wwnn; struct fip_size_desc size; - } __attribute__((packed)) desc; - } __attribute__((packed)) *sol; + } __packed desc; + } __packed * sol; u32 fcoe_size; skb = dev_alloc_skb(sizeof(*sol)); @@ -456,7 +430,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, struct ethhdr eth; struct fip_header fip; struct fip_mac_desc mac; - } __attribute__((packed)) *kal; + } __packed * kal; struct fip_vn_desc *vn; u32 len; struct fc_lport *lp; @@ -527,7 +501,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport, struct ethhdr eth; struct fip_header fip; struct fip_encaps encaps; - } __attribute__((packed)) *cap; + } __packed * cap; struct fc_frame_header *fh; struct fip_mac_desc *mac; struct fcoe_fcf *fcf; @@ -1819,7 +1793,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip, struct fip_mac_desc mac; struct fip_wwn_desc wwnn; struct fip_vn_desc vn; - } __attribute__((packed)) *frame; + } __packed * frame; struct fip_fc4_feat *ff; struct fip_size_desc *size; u32 fcp_feat; diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c new file mode 100644 index 000000000000..258684101bfd --- /dev/null +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -0,0 +1,770 @@ +/* + * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * Maintained at www.Open-FCoE.org + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/errno.h> +#include <linux/crc32.h> +#include <scsi/libfcoe.h> + +#include "libfcoe.h" + +MODULE_AUTHOR("Open-FCoE.org"); +MODULE_DESCRIPTION("FIP discovery protocol and FCoE transport for FCoE HBAs"); +MODULE_LICENSE("GPL v2"); + +static int fcoe_transport_create(const char *, struct kernel_param *); +static int fcoe_transport_destroy(const char *, struct kernel_param *); +static int fcoe_transport_show(char *buffer, const struct kernel_param *kp); +static struct fcoe_transport *fcoe_transport_lookup(struct net_device *device); +static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *device); +static int fcoe_transport_enable(const char *, struct kernel_param *); +static int fcoe_transport_disable(const char *, struct kernel_param *); +static int libfcoe_device_notification(struct notifier_block *notifier, + ulong event, void *ptr); + +static LIST_HEAD(fcoe_transports); +static DEFINE_MUTEX(ft_mutex); +static LIST_HEAD(fcoe_netdevs); +static DEFINE_MUTEX(fn_mutex); + +unsigned int libfcoe_debug_logging; +module_param_named(debug_logging, libfcoe_debug_logging, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); + +module_param_call(show, NULL, fcoe_transport_show, NULL, S_IRUSR); +__MODULE_PARM_TYPE(show, "string"); +MODULE_PARM_DESC(show, " Show attached FCoE transports"); + +module_param_call(create, fcoe_transport_create, NULL, + (void *)FIP_MODE_FABRIC, S_IWUSR); +__MODULE_PARM_TYPE(create, "string"); +MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); + +module_param_call(create_vn2vn, fcoe_transport_create, NULL, + (void *)FIP_MODE_VN2VN, S_IWUSR); +__MODULE_PARM_TYPE(create_vn2vn, "string"); +MODULE_PARM_DESC(create_vn2vn, " Creates a VN_node to VN_node FCoE instance " + "on an Ethernet interface"); + +module_param_call(destroy, fcoe_transport_destroy, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(destroy, "string"); +MODULE_PARM_DESC(destroy, " Destroys fcoe instance on a ethernet interface"); + +module_param_call(enable, fcoe_transport_enable, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(enable, "string"); +MODULE_PARM_DESC(enable, " Enables fcoe on a ethernet interface."); + +module_param_call(disable, fcoe_transport_disable, NULL, NULL, S_IWUSR); +__MODULE_PARM_TYPE(disable, "string"); +MODULE_PARM_DESC(disable, " Disables fcoe on a ethernet interface."); + +/* notification function for packets from net device */ +static struct notifier_block libfcoe_notifier = { + .notifier_call = libfcoe_device_notification, +}; + +/** + * fcoe_fc_crc() - Calculates the CRC for a given frame + * @fp: The frame to be checksumed + * + * This uses crc32() routine to calculate the CRC for a frame + * + * Return: The 32 bit CRC value + */ +u32 fcoe_fc_crc(struct fc_frame *fp) +{ + struct sk_buff *skb = fp_skb(fp); + struct skb_frag_struct *frag; + unsigned char *data; + unsigned long off, len, clen; + u32 crc; + unsigned i; + + crc = crc32(~0, skb->data, skb_headlen(skb)); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + off = frag->page_offset; + len = frag->size; + while (len > 0) { + clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); + data = kmap_atomic(frag->page + (off >> PAGE_SHIFT), + KM_SKB_DATA_SOFTIRQ); + crc = crc32(crc, data + (off & ~PAGE_MASK), clen); + kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); + off += clen; + len -= clen; + } + } + return crc; +} +EXPORT_SYMBOL_GPL(fcoe_fc_crc); + +/** + * fcoe_start_io() - Start FCoE I/O + * @skb: The packet to be transmitted + * + * This routine is called from the net device to start transmitting + * FCoE packets. + * + * Returns: 0 for success + */ +int fcoe_start_io(struct sk_buff *skb) +{ + struct sk_buff *nskb; + int rc; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + rc = dev_queue_xmit(nskb); + if (rc != 0) + return rc; + kfree_skb(skb); + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_start_io); + + +/** + * fcoe_clean_pending_queue() - Dequeue a skb and free it + * @lport: The local port to dequeue a skb on + */ +void fcoe_clean_pending_queue(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct sk_buff *skb; + + spin_lock_bh(&port->fcoe_pending_queue.lock); + while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) { + spin_unlock_bh(&port->fcoe_pending_queue.lock); + kfree_skb(skb); + spin_lock_bh(&port->fcoe_pending_queue.lock); + } + spin_unlock_bh(&port->fcoe_pending_queue.lock); +} +EXPORT_SYMBOL_GPL(fcoe_clean_pending_queue); + +/** + * fcoe_check_wait_queue() - Attempt to clear the transmit backlog + * @lport: The local port whose backlog is to be cleared + * + * This empties the wait_queue, dequeues the head of the wait_queue queue + * and calls fcoe_start_io() for each packet. If all skb have been + * transmitted it returns the qlen. If an error occurs it restores + * wait_queue (to try again later) and returns -1. + * + * The wait_queue is used when the skb transmit fails. The failed skb + * will go in the wait_queue which will be emptied by the timer function or + * by the next skb transmit. + */ +void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb) +{ + struct fcoe_port *port = lport_priv(lport); + int rc; + + spin_lock_bh(&port->fcoe_pending_queue.lock); + + if (skb) + __skb_queue_tail(&port->fcoe_pending_queue, skb); + + if (port->fcoe_pending_queue_active) + goto out; + port->fcoe_pending_queue_active = 1; + + while (port->fcoe_pending_queue.qlen) { + /* keep qlen > 0 until fcoe_start_io succeeds */ + port->fcoe_pending_queue.qlen++; + skb = __skb_dequeue(&port->fcoe_pending_queue); + + spin_unlock_bh(&port->fcoe_pending_queue.lock); + rc = fcoe_start_io(skb); + spin_lock_bh(&port->fcoe_pending_queue.lock); + + if (rc) { + __skb_queue_head(&port->fcoe_pending_queue, skb); + /* undo temporary increment above */ + port->fcoe_pending_queue.qlen--; + break; + } + /* undo temporary increment above */ + port->fcoe_pending_queue.qlen--; + } + + if (port->fcoe_pending_queue.qlen < port->min_queue_depth) + lport->qfull = 0; + if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer)) + mod_timer(&port->timer, jiffies + 2); + port->fcoe_pending_queue_active = 0; +out: + if (port->fcoe_pending_queue.qlen > port->max_queue_depth) + lport->qfull = 1; + spin_unlock_bh(&port->fcoe_pending_queue.lock); +} +EXPORT_SYMBOL_GPL(fcoe_check_wait_queue); + +/** + * fcoe_queue_timer() - The fcoe queue timer + * @lport: The local port + * + * Calls fcoe_check_wait_queue on timeout + */ +void fcoe_queue_timer(ulong lport) +{ + fcoe_check_wait_queue((struct fc_lport *)lport, NULL); +} +EXPORT_SYMBOL_GPL(fcoe_queue_timer); + +/** + * fcoe_get_paged_crc_eof() - Allocate a page to be used for the trailer CRC + * @skb: The packet to be transmitted + * @tlen: The total length of the trailer + * @fps: The fcoe context + * + * This routine allocates a page for frame trailers. The page is re-used if + * there is enough room left on it for the current trailer. If there isn't + * enough buffer left a new page is allocated for the trailer. Reference to + * the page from this function as well as the skbs using the page fragments + * ensure that the page is freed at the appropriate time. + * + * Returns: 0 for success + */ +int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen, + struct fcoe_percpu_s *fps) +{ + struct page *page; + + page = fps->crc_eof_page; + if (!page) { + page = alloc_page(GFP_ATOMIC); + if (!page) + return -ENOMEM; + + fps->crc_eof_page = page; + fps->crc_eof_offset = 0; + } + + get_page(page); + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, + fps->crc_eof_offset, tlen); + skb->len += tlen; + skb->data_len += tlen; + skb->truesize += tlen; + fps->crc_eof_offset += sizeof(struct fcoe_crc_eof); + + if (fps->crc_eof_offset >= PAGE_SIZE) { + fps->crc_eof_page = NULL; + fps->crc_eof_offset = 0; + put_page(page); + } + + return 0; +} +EXPORT_SYMBOL_GPL(fcoe_get_paged_crc_eof); + +/** + * fcoe_transport_lookup - find an fcoe transport that matches a netdev + * @netdev: The netdev to look for from all attached transports + * + * Returns : ptr to the fcoe transport that supports this netdev or NULL + * if not found. + * + * The ft_mutex should be held when this is called + */ +static struct fcoe_transport *fcoe_transport_lookup(struct net_device *netdev) +{ + struct fcoe_transport *ft = NULL; + + list_for_each_entry(ft, &fcoe_transports, list) + if (ft->match && ft->match(netdev)) + return ft; + return NULL; +} + +/** + * fcoe_transport_attach - Attaches an FCoE transport + * @ft: The fcoe transport to be attached + * + * Returns : 0 for success + */ +int fcoe_transport_attach(struct fcoe_transport *ft) +{ + int rc = 0; + + mutex_lock(&ft_mutex); + if (ft->attached) { + LIBFCOE_TRANSPORT_DBG("transport %s already attached\n", + ft->name); + rc = -EEXIST; + goto out_attach; + } + + /* Add default transport to the tail */ + if (strcmp(ft->name, FCOE_TRANSPORT_DEFAULT)) + list_add(&ft->list, &fcoe_transports); + else + list_add_tail(&ft->list, &fcoe_transports); + + ft->attached = true; + LIBFCOE_TRANSPORT_DBG("attaching transport %s\n", ft->name); + +out_attach: + mutex_unlock(&ft_mutex); + return rc; +} +EXPORT_SYMBOL(fcoe_transport_attach); + +/** + * fcoe_transport_attach - Detaches an FCoE transport + * @ft: The fcoe transport to be attached + * + * Returns : 0 for success + */ +int fcoe_transport_detach(struct fcoe_transport *ft) +{ + int rc = 0; + + mutex_lock(&ft_mutex); + if (!ft->attached) { + LIBFCOE_TRANSPORT_DBG("transport %s already detached\n", + ft->name); + rc = -ENODEV; + goto out_attach; + } + + list_del(&ft->list); + ft->attached = false; + LIBFCOE_TRANSPORT_DBG("detaching transport %s\n", ft->name); + +out_attach: + mutex_unlock(&ft_mutex); + return rc; + +} +EXPORT_SYMBOL(fcoe_transport_detach); + +static int fcoe_transport_show(char *buffer, const struct kernel_param *kp) +{ + int i, j; + struct fcoe_transport *ft = NULL; + + i = j = sprintf(buffer, "Attached FCoE transports:"); + mutex_lock(&ft_mutex); + list_for_each_entry(ft, &fcoe_transports, list) { + i += snprintf(&buffer[i], IFNAMSIZ, "%s ", ft->name); + if (i >= PAGE_SIZE) + break; + } + mutex_unlock(&ft_mutex); + if (i == j) + i += snprintf(&buffer[i], IFNAMSIZ, "none"); + return i; +} + +static int __init fcoe_transport_init(void) +{ + register_netdevice_notifier(&libfcoe_notifier); + return 0; +} + +static int __exit fcoe_transport_exit(void) +{ + struct fcoe_transport *ft; + + unregister_netdevice_notifier(&libfcoe_notifier); + mutex_lock(&ft_mutex); + list_for_each_entry(ft, &fcoe_transports, list) + printk(KERN_ERR "FCoE transport %s is still attached!\n", + ft->name); + mutex_unlock(&ft_mutex); + return 0; +} + + +static int fcoe_add_netdev_mapping(struct net_device *netdev, + struct fcoe_transport *ft) +{ + struct fcoe_netdev_mapping *nm; + + nm = kmalloc(sizeof(*nm), GFP_KERNEL); + if (!nm) { + printk(KERN_ERR "Unable to allocate netdev_mapping"); + return -ENOMEM; + } + + nm->netdev = netdev; + nm->ft = ft; + + mutex_lock(&fn_mutex); + list_add(&nm->list, &fcoe_netdevs); + mutex_unlock(&fn_mutex); + return 0; +} + + +static void fcoe_del_netdev_mapping(struct net_device *netdev) +{ + struct fcoe_netdev_mapping *nm = NULL, *tmp; + + mutex_lock(&fn_mutex); + list_for_each_entry_safe(nm, tmp, &fcoe_netdevs, list) { + if (nm->netdev == netdev) { + list_del(&nm->list); + kfree(nm); + mutex_unlock(&fn_mutex); + return; + } + } + mutex_unlock(&fn_mutex); +} + + +/** + * fcoe_netdev_map_lookup - find the fcoe transport that matches the netdev on which + * it was created + * + * Returns : ptr to the fcoe transport that supports this netdev or NULL + * if not found. + * + * The ft_mutex should be held when this is called + */ +static struct fcoe_transport *fcoe_netdev_map_lookup(struct net_device *netdev) +{ + struct fcoe_transport *ft = NULL; + struct fcoe_netdev_mapping *nm; + + mutex_lock(&fn_mutex); + list_for_each_entry(nm, &fcoe_netdevs, list) { + if (netdev == nm->netdev) { + ft = nm->ft; + mutex_unlock(&fn_mutex); + return ft; + } + } + + mutex_unlock(&fn_mutex); + return NULL; +} + +/** + * fcoe_if_to_netdev() - Parse a name buffer to get a net device + * @buffer: The name of the net device + * + * Returns: NULL or a ptr to net_device + */ +static struct net_device *fcoe_if_to_netdev(const char *buffer) +{ + char *cp; + char ifname[IFNAMSIZ + 2]; + + if (buffer) { + strlcpy(ifname, buffer, IFNAMSIZ); + cp = ifname + strlen(ifname); + while (--cp >= ifname && *cp == '\n') + *cp = '\0'; + return dev_get_by_name(&init_net, ifname); + } + return NULL; +} + +/** + * libfcoe_device_notification() - Handler for net device events + * @notifier: The context of the notification + * @event: The type of event + * @ptr: The net device that the event was on + * + * This function is called by the Ethernet driver in case of link change event. + * + * Returns: 0 for success + */ +static int libfcoe_device_notification(struct notifier_block *notifier, + ulong event, void *ptr) +{ + struct net_device *netdev = ptr; + + switch (event) { + case NETDEV_UNREGISTER: + printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n", + netdev->name); + fcoe_del_netdev_mapping(netdev); + break; + } + return NOTIFY_OK; +} + + +/** + * fcoe_transport_create() - Create a fcoe interface + * @buffer: The name of the Ethernet interface to create on + * @kp: The associated kernel param + * + * Called from sysfs. This holds the ft_mutex while calling the + * registered fcoe transport's create function. + * + * Returns: 0 for success + */ +static int fcoe_transport_create(const char *buffer, struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + enum fip_state fip_mode = (enum fip_state)(long)kp->arg; + + if (!mutex_trylock(&ft_mutex)) + return restart_syscall(); + +#ifdef CONFIG_LIBFCOE_MODULE + /* + * Make sure the module has been initialized, and is not about to be + * removed. Module parameter sysfs files are writable before the + * module_init function is called and after module_exit. + */ + if (THIS_MODULE->state != MODULE_STATE_LIVE) + goto out_nodev; +#endif + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) { + LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buffer); + goto out_nodev; + } + + ft = fcoe_netdev_map_lookup(netdev); + if (ft) { + LIBFCOE_TRANSPORT_DBG("transport %s already has existing " + "FCoE instance on %s.\n", + ft->name, netdev->name); + rc = -EEXIST; + goto out_putdev; + } + + ft = fcoe_transport_lookup(netdev); + if (!ft) { + LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", + netdev->name); + goto out_putdev; + } + + rc = fcoe_add_netdev_mapping(netdev, ft); + if (rc) { + LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping " + "for FCoE transport %s for %s.\n", + ft->name, netdev->name); + goto out_putdev; + } + + /* pass to transport create */ + rc = ft->create ? ft->create(netdev, fip_mode) : -ENODEV; + if (rc) + fcoe_del_netdev_mapping(netdev); + + LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n", + ft->name, (rc) ? "failed" : "succeeded", + netdev->name); + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + if (rc == -ERESTARTSYS) + return restart_syscall(); + else + return rc; +} + +/** + * fcoe_transport_destroy() - Destroy a FCoE interface + * @buffer: The name of the Ethernet interface to be destroyed + * @kp: The associated kernel parameter + * + * Called from sysfs. This holds the ft_mutex while calling the + * registered fcoe transport's destroy function. + * + * Returns: 0 for success + */ +static int fcoe_transport_destroy(const char *buffer, struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + + if (!mutex_trylock(&ft_mutex)) + return restart_syscall(); + +#ifdef CONFIG_LIBFCOE_MODULE + /* + * Make sure the module has been initialized, and is not about to be + * removed. Module parameter sysfs files are writable before the + * module_init function is called and after module_exit. + */ + if (THIS_MODULE->state != MODULE_STATE_LIVE) + goto out_nodev; +#endif + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) { + LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buffer); + goto out_nodev; + } + + ft = fcoe_netdev_map_lookup(netdev); + if (!ft) { + LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n", + netdev->name); + goto out_putdev; + } + + /* pass to transport destroy */ + rc = ft->destroy ? ft->destroy(netdev) : -ENODEV; + fcoe_del_netdev_mapping(netdev); + LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n", + ft->name, (rc) ? "failed" : "succeeded", + netdev->name); + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + + if (rc == -ERESTARTSYS) + return restart_syscall(); + else + return rc; +} + +/** + * fcoe_transport_disable() - Disables a FCoE interface + * @buffer: The name of the Ethernet interface to be disabled + * @kp: The associated kernel parameter + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int fcoe_transport_disable(const char *buffer, struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + + if (!mutex_trylock(&ft_mutex)) + return restart_syscall(); + +#ifdef CONFIG_LIBFCOE_MODULE + /* + * Make sure the module has been initialized, and is not about to be + * removed. Module parameter sysfs files are writable before the + * module_init function is called and after module_exit. + */ + if (THIS_MODULE->state != MODULE_STATE_LIVE) + goto out_nodev; +#endif + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) + goto out_nodev; + + ft = fcoe_netdev_map_lookup(netdev); + if (!ft) + goto out_putdev; + + rc = ft->disable ? ft->disable(netdev) : -ENODEV; + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + + if (rc == -ERESTARTSYS) + return restart_syscall(); + else + return rc; +} + +/** + * fcoe_transport_enable() - Enables a FCoE interface + * @buffer: The name of the Ethernet interface to be enabled + * @kp: The associated kernel parameter + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int fcoe_transport_enable(const char *buffer, struct kernel_param *kp) +{ + int rc = -ENODEV; + struct net_device *netdev = NULL; + struct fcoe_transport *ft = NULL; + + if (!mutex_trylock(&ft_mutex)) + return restart_syscall(); + +#ifdef CONFIG_LIBFCOE_MODULE + /* + * Make sure the module has been initialized, and is not about to be + * removed. Module parameter sysfs files are writable before the + * module_init function is called and after module_exit. + */ + if (THIS_MODULE->state != MODULE_STATE_LIVE) + goto out_nodev; +#endif + + netdev = fcoe_if_to_netdev(buffer); + if (!netdev) + goto out_nodev; + + ft = fcoe_netdev_map_lookup(netdev); + if (!ft) + goto out_putdev; + + rc = ft->enable ? ft->enable(netdev) : -ENODEV; + +out_putdev: + dev_put(netdev); +out_nodev: + mutex_unlock(&ft_mutex); + if (rc == -ERESTARTSYS) + return restart_syscall(); + else + return rc; +} + +/** + * libfcoe_init() - Initialization routine for libfcoe.ko + */ +static int __init libfcoe_init(void) +{ + fcoe_transport_init(); + + return 0; +} +module_init(libfcoe_init); + +/** + * libfcoe_exit() - Tear down libfcoe.ko + */ +static void __exit libfcoe_exit(void) +{ + fcoe_transport_exit(); +} +module_exit(libfcoe_exit); diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h new file mode 100644 index 000000000000..6af5fc3a17d8 --- /dev/null +++ b/drivers/scsi/fcoe/libfcoe.h @@ -0,0 +1,31 @@ +#ifndef _FCOE_LIBFCOE_H_ +#define _FCOE_LIBFCOE_H_ + +extern unsigned int libfcoe_debug_logging; +#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */ +#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */ +#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */ + +#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \ +do { \ + if (unlikely(libfcoe_debug_logging & LEVEL)) \ + do { \ + CMD; \ + } while (0); \ +} while (0) + +#define LIBFCOE_DBG(fmt, args...) \ + LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \ + printk(KERN_INFO "libfcoe: " fmt, ##args);) + +#define LIBFCOE_FIP_DBG(fip, fmt, args...) \ + LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \ + printk(KERN_INFO "host%d: fip: " fmt, \ + (fip)->lp->host->host_no, ##args);) + +#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \ + LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \ + printk(KERN_INFO "%s: " fmt, \ + __func__, ##args);) + +#endif /* _FCOE_LIBFCOE_H_ */ diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index 92f185081e62..671cde9d4060 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -37,7 +37,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.4.0.145" +#define DRV_VERSION "1.5.0.1" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index db710148d156..b576be734e2e 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -654,7 +654,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev) vdev->linkstatus_pa); if (vdev->stats) pci_free_consistent(vdev->pdev, - sizeof(struct vnic_dev), + sizeof(struct vnic_stats), vdev->stats, vdev->stats_pa); if (vdev->fw_info) pci_free_consistent(vdev->pdev, diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 12deffccb8da..415ad4fb50d4 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -74,6 +74,10 @@ static int hpsa_allow_any; module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(hpsa_allow_any, "Allow hpsa driver to access unknown HP Smart Array hardware"); +static int hpsa_simple_mode; +module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(hpsa_simple_mode, + "Use 'simple mode' rather than 'performant mode'"); /* define the PCI info for the cards we can control */ static const struct pci_device_id hpsa_pci_device_id[] = { @@ -85,11 +89,13 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} @@ -109,11 +115,13 @@ static struct board_type products[] = { {0x3249103C, "Smart Array P812", &SA5_access}, {0x324a103C, "Smart Array P712m", &SA5_access}, {0x324b103C, "Smart Array P711m", &SA5_access}, - {0x3250103C, "Smart Array", &SA5_access}, - {0x3250113C, "Smart Array", &SA5_access}, - {0x3250123C, "Smart Array", &SA5_access}, - {0x3250133C, "Smart Array", &SA5_access}, - {0x3250143C, "Smart Array", &SA5_access}, + {0x3350103C, "Smart Array", &SA5_access}, + {0x3351103C, "Smart Array", &SA5_access}, + {0x3352103C, "Smart Array", &SA5_access}, + {0x3353103C, "Smart Array", &SA5_access}, + {0x3354103C, "Smart Array", &SA5_access}, + {0x3355103C, "Smart Array", &SA5_access}, + {0x3356103C, "Smart Array", &SA5_access}, {0xFFFF103C, "Unknown Smart Array", &SA5_access}, }; @@ -147,17 +155,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); static int hpsa_slave_alloc(struct scsi_device *sdev); static void hpsa_slave_destroy(struct scsi_device *sdev); -static ssize_t raid_level_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t lunid_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t unique_id_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t host_show_firmware_revision(struct device *dev, - struct device_attribute *attr, char *buf); static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); -static ssize_t host_store_rescan(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count); static int check_for_unit_attention(struct ctlr_info *h, struct CommandList *c); static void check_ioctl_unit_attention(struct ctlr_info *h, @@ -173,47 +171,10 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); - -static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); -static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); -static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); -static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); -static DEVICE_ATTR(firmware_revision, S_IRUGO, - host_show_firmware_revision, NULL); - -static struct device_attribute *hpsa_sdev_attrs[] = { - &dev_attr_raid_level, - &dev_attr_lunid, - &dev_attr_unique_id, - NULL, -}; - -static struct device_attribute *hpsa_shost_attrs[] = { - &dev_attr_rescan, - &dev_attr_firmware_revision, - NULL, -}; - -static struct scsi_host_template hpsa_driver_template = { - .module = THIS_MODULE, - .name = "hpsa", - .proc_name = "hpsa", - .queuecommand = hpsa_scsi_queue_command, - .scan_start = hpsa_scan_start, - .scan_finished = hpsa_scan_finished, - .change_queue_depth = hpsa_change_queue_depth, - .this_id = -1, - .use_clustering = ENABLE_CLUSTERING, - .eh_device_reset_handler = hpsa_eh_device_reset_handler, - .ioctl = hpsa_ioctl, - .slave_alloc = hpsa_slave_alloc, - .slave_destroy = hpsa_slave_destroy, -#ifdef CONFIG_COMPAT - .compat_ioctl = hpsa_compat_ioctl, -#endif - .sdev_attrs = hpsa_sdev_attrs, - .shost_attrs = hpsa_shost_attrs, -}; +static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, + void __iomem *vaddr, int wait_for_ready); +#define BOARD_NOT_READY 0 +#define BOARD_READY 1 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) { @@ -291,67 +252,63 @@ static ssize_t host_show_firmware_revision(struct device *dev, fwrev[0], fwrev[1], fwrev[2], fwrev[3]); } -/* Enqueuing and dequeuing functions for cmdlists. */ -static inline void addQ(struct hlist_head *list, struct CommandList *c) +static ssize_t host_show_commands_outstanding(struct device *dev, + struct device_attribute *attr, char *buf) { - hlist_add_head(&c->list, list); + struct Scsi_Host *shost = class_to_shost(dev); + struct ctlr_info *h = shost_to_hba(shost); + + return snprintf(buf, 20, "%d\n", h->commands_outstanding); } -static inline u32 next_command(struct ctlr_info *h) +static ssize_t host_show_transport_mode(struct device *dev, + struct device_attribute *attr, char *buf) { - u32 a; - - if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) - return h->access.command_completed(h); + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); - if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { - a = *(h->reply_pool_head); /* Next cmd in ring buffer */ - (h->reply_pool_head)++; - h->commands_outstanding--; - } else { - a = FIFO_EMPTY; - } - /* Check for wraparound */ - if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { - h->reply_pool_head = h->reply_pool; - h->reply_pool_wraparound ^= 1; - } - return a; + h = shost_to_hba(shost); + return snprintf(buf, 20, "%s\n", + h->transMethod & CFGTBL_Trans_Performant ? + "performant" : "simple"); } -/* set_performant_mode: Modify the tag for cciss performant - * set bit 0 for pull model, bits 3-1 for block fetch - * register number - */ -static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) -{ - if (likely(h->transMethod == CFGTBL_Trans_Performant)) - c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); -} +/* List of controllers which cannot be reset on kexec with reset_devices */ +static u32 unresettable_controller[] = { + 0x324a103C, /* Smart Array P712m */ + 0x324b103C, /* SmartArray P711m */ + 0x3223103C, /* Smart Array P800 */ + 0x3234103C, /* Smart Array P400 */ + 0x3235103C, /* Smart Array P400i */ + 0x3211103C, /* Smart Array E200i */ + 0x3212103C, /* Smart Array E200 */ + 0x3213103C, /* Smart Array E200i */ + 0x3214103C, /* Smart Array E200i */ + 0x3215103C, /* Smart Array E200i */ + 0x3237103C, /* Smart Array E500 */ + 0x323D103C, /* Smart Array P700m */ + 0x409C0E11, /* Smart Array 6400 */ + 0x409D0E11, /* Smart Array 6400 EM */ +}; -static void enqueue_cmd_and_start_io(struct ctlr_info *h, - struct CommandList *c) +static int ctlr_is_resettable(struct ctlr_info *h) { - unsigned long flags; + int i; - set_performant_mode(h, c); - spin_lock_irqsave(&h->lock, flags); - addQ(&h->reqQ, c); - h->Qdepth++; - start_io(h); - spin_unlock_irqrestore(&h->lock, flags); + for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) + if (unresettable_controller[i] == h->board_id) + return 0; + return 1; } -static inline void removeQ(struct CommandList *c) +static ssize_t host_show_resettable(struct device *dev, + struct device_attribute *attr, char *buf) { - if (WARN_ON(hlist_unhashed(&c->list))) - return; - hlist_del_init(&c->list); -} + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); -static inline int is_hba_lunid(unsigned char scsi3addr[]) -{ - return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; + h = shost_to_hba(shost); + return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); } static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) @@ -359,15 +316,6 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) return (scsi3addr[3] & 0xC0) == 0x40; } -static inline int is_scsi_rev_5(struct ctlr_info *h) -{ - if (!h->hba_inquiry_data) - return 0; - if ((h->hba_inquiry_data[2] & 0x07) == 5) - return 1; - return 0; -} - static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", "UNKNOWN" }; @@ -459,6 +407,129 @@ static ssize_t unique_id_show(struct device *dev, sn[12], sn[13], sn[14], sn[15]); } +static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); +static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); +static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); +static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); +static DEVICE_ATTR(firmware_revision, S_IRUGO, + host_show_firmware_revision, NULL); +static DEVICE_ATTR(commands_outstanding, S_IRUGO, + host_show_commands_outstanding, NULL); +static DEVICE_ATTR(transport_mode, S_IRUGO, + host_show_transport_mode, NULL); +static DEVICE_ATTR(resettable, S_IRUGO, + host_show_resettable, NULL); + +static struct device_attribute *hpsa_sdev_attrs[] = { + &dev_attr_raid_level, + &dev_attr_lunid, + &dev_attr_unique_id, + NULL, +}; + +static struct device_attribute *hpsa_shost_attrs[] = { + &dev_attr_rescan, + &dev_attr_firmware_revision, + &dev_attr_commands_outstanding, + &dev_attr_transport_mode, + &dev_attr_resettable, + NULL, +}; + +static struct scsi_host_template hpsa_driver_template = { + .module = THIS_MODULE, + .name = "hpsa", + .proc_name = "hpsa", + .queuecommand = hpsa_scsi_queue_command, + .scan_start = hpsa_scan_start, + .scan_finished = hpsa_scan_finished, + .change_queue_depth = hpsa_change_queue_depth, + .this_id = -1, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = hpsa_eh_device_reset_handler, + .ioctl = hpsa_ioctl, + .slave_alloc = hpsa_slave_alloc, + .slave_destroy = hpsa_slave_destroy, +#ifdef CONFIG_COMPAT + .compat_ioctl = hpsa_compat_ioctl, +#endif + .sdev_attrs = hpsa_sdev_attrs, + .shost_attrs = hpsa_shost_attrs, +}; + + +/* Enqueuing and dequeuing functions for cmdlists. */ +static inline void addQ(struct list_head *list, struct CommandList *c) +{ + list_add_tail(&c->list, list); +} + +static inline u32 next_command(struct ctlr_info *h) +{ + u32 a; + + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) + return h->access.command_completed(h); + + if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { + a = *(h->reply_pool_head); /* Next cmd in ring buffer */ + (h->reply_pool_head)++; + h->commands_outstanding--; + } else { + a = FIFO_EMPTY; + } + /* Check for wraparound */ + if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { + h->reply_pool_head = h->reply_pool; + h->reply_pool_wraparound ^= 1; + } + return a; +} + +/* set_performant_mode: Modify the tag for cciss performant + * set bit 0 for pull model, bits 3-1 for block fetch + * register number + */ +static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) +{ + if (likely(h->transMethod & CFGTBL_Trans_Performant)) + c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); +} + +static void enqueue_cmd_and_start_io(struct ctlr_info *h, + struct CommandList *c) +{ + unsigned long flags; + + set_performant_mode(h, c); + spin_lock_irqsave(&h->lock, flags); + addQ(&h->reqQ, c); + h->Qdepth++; + start_io(h); + spin_unlock_irqrestore(&h->lock, flags); +} + +static inline void removeQ(struct CommandList *c) +{ + if (WARN_ON(list_empty(&c->list))) + return; + list_del_init(&c->list); +} + +static inline int is_hba_lunid(unsigned char scsi3addr[]) +{ + return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; +} + +static inline int is_scsi_rev_5(struct ctlr_info *h) +{ + if (!h->hba_inquiry_data) + return 0; + if ((h->hba_inquiry_data[2] & 0x07) == 5) + return 1; + return 0; +} + static int hpsa_find_target_lun(struct ctlr_info *h, unsigned char scsi3addr[], int bus, int *target, int *lun) { @@ -1130,6 +1201,10 @@ static void complete_scsi_command(struct CommandList *cp, cmd->result = DID_TIME_OUT << 16; dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); break; + case CMD_UNABORTABLE: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "Command unabortable\n"); + break; default: cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", @@ -1160,7 +1235,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h) sh->sg_tablesize = h->maxsgentries; h->scsi_host = sh; sh->hostdata[0] = (unsigned long) h; - sh->irq = h->intr[PERF_MODE_INT]; + sh->irq = h->intr[h->intr_mode]; sh->unique_id = sh->irq; error = scsi_add_host(sh, &h->pdev->dev); if (error) @@ -1295,6 +1370,9 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp) case CMD_TIMEOUT: dev_warn(d, "cp %p timed out\n", cp); break; + case CMD_UNABORTABLE: + dev_warn(d, "Command unabortable\n"); + break; default: dev_warn(d, "cp %p returned unknown status %x\n", cp, ei->CommandStatus); @@ -1595,6 +1673,8 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ return 0; + memset(scsi3addr, 0, 8); + scsi3addr[3] = target; if (is_hba_lunid(scsi3addr)) return 0; /* Don't add the RAID controller here. */ @@ -1609,8 +1689,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, return 0; } - memset(scsi3addr, 0, 8); - scsi3addr[3] = target; if (hpsa_update_device_info(h, scsi3addr, this_device)) return 0; (*nmsa2xxx_enclosures)++; @@ -2199,7 +2277,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) c->cmdindex = i; - INIT_HLIST_NODE(&c->list); + INIT_LIST_HEAD(&c->list); c->busaddr = (u32) cmd_dma_handle; temp64.val = (u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; @@ -2237,7 +2315,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) } memset(c->err_info, 0, sizeof(*c->err_info)); - INIT_HLIST_NODE(&c->list); + INIT_LIST_HEAD(&c->list); c->busaddr = (u32) cmd_dma_handle; temp64.val = (u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; @@ -2267,7 +2345,7 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) pci_free_consistent(h->pdev, sizeof(*c->err_info), c->err_info, (dma_addr_t) temp64.val); pci_free_consistent(h->pdev, sizeof(*c), - c, (dma_addr_t) c->busaddr); + c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); } #ifdef CONFIG_COMPAT @@ -2281,6 +2359,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) int err; u32 cp; + memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); @@ -2317,6 +2396,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, int err; u32 cp; + memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); @@ -2433,15 +2513,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) return -EFAULT; - } - if (iocommand.Request.Type.Direction == XFER_WRITE) { - /* Copy the data into the buffer we created */ - if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { - kfree(buff); - return -EFAULT; + if (iocommand.Request.Type.Direction == XFER_WRITE) { + /* Copy the data into the buffer we created */ + if (copy_from_user(buff, iocommand.buf, + iocommand.buf_size)) { + kfree(buff); + return -EFAULT; + } + } else { + memset(buff, 0, iocommand.buf_size); } - } else - memset(buff, 0, iocommand.buf_size); + } c = cmd_special_alloc(h); if (c == NULL) { kfree(buff); @@ -2487,8 +2569,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) cmd_special_free(h, c); return -EFAULT; } - - if (iocommand.Request.Type.Direction == XFER_READ) { + if (iocommand.Request.Type.Direction == XFER_READ && + iocommand.buf_size > 0) { /* Copy the data out of the buffer we created */ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { kfree(buff); @@ -2581,14 +2663,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) } c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; - - if (ioc->buf_size > 0) { - c->Header.SGList = sg_used; - c->Header.SGTotal = sg_used; - } else { - c->Header.SGList = 0; - c->Header.SGTotal = 0; - } + c->Header.SGList = c->Header.SGTotal = sg_used; memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); c->Header.Tag.lower = c->busaddr; memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); @@ -2605,7 +2680,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) } } hpsa_scsi_do_simple_cmd_core(h, c); - hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); + if (sg_used) + hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); /* Copy the error information out */ memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); @@ -2614,7 +2690,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) status = -EFAULT; goto cleanup1; } - if (ioc->Request.Type.Direction == XFER_READ) { + if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { /* Copy the data out of the buffer we created */ BYTE __user *ptr = ioc->buf; for (i = 0; i < sg_used; i++) { @@ -2810,8 +2886,8 @@ static void start_io(struct ctlr_info *h) { struct CommandList *c; - while (!hlist_empty(&h->reqQ)) { - c = hlist_entry(h->reqQ.first, struct CommandList, list); + while (!list_empty(&h->reqQ)) { + c = list_entry(h->reqQ.next, struct CommandList, list); /* can't do anything if fifo is full */ if ((h->access.fifo_full(h))) { dev_warn(&h->pdev->dev, "fifo full\n"); @@ -2867,20 +2943,22 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag) static inline u32 hpsa_tag_contains_index(u32 tag) { -#define DIRECT_LOOKUP_BIT 0x10 return tag & DIRECT_LOOKUP_BIT; } static inline u32 hpsa_tag_to_index(u32 tag) { -#define DIRECT_LOOKUP_SHIFT 5 return tag >> DIRECT_LOOKUP_SHIFT; } -static inline u32 hpsa_tag_discard_error_bits(u32 tag) + +static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) { -#define HPSA_ERROR_BITS 0x03 - return tag & ~HPSA_ERROR_BITS; +#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) +#define HPSA_SIMPLE_ERROR_BITS 0x03 + if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) + return tag & ~HPSA_SIMPLE_ERROR_BITS; + return tag & ~HPSA_PERF_ERROR_BITS; } /* process completion of an indexed ("direct lookup") command */ @@ -2904,10 +2982,9 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h, { u32 tag; struct CommandList *c = NULL; - struct hlist_node *tmp; - tag = hpsa_tag_discard_error_bits(raw_tag); - hlist_for_each_entry(c, tmp, &h->cmpQ, list) { + tag = hpsa_tag_discard_error_bits(h, raw_tag); + list_for_each_entry(c, &h->cmpQ, list) { if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { finish_cmd(c, raw_tag); return next_command(h); @@ -2957,7 +3034,10 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) return IRQ_HANDLED; } -/* Send a message CDB to the firmware. */ +/* Send a message CDB to the firmware. Careful, this only works + * in simple mode, not performant mode due to the tag lookup. + * We only ever use this immediately after a controller reset. + */ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) { @@ -3023,7 +3103,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); - if (hpsa_tag_discard_error_bits(tag) == paddr32) + if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) break; msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); } @@ -3055,38 +3135,6 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0) #define hpsa_noop(p) hpsa_message(p, 3, 0) -static __devinit int hpsa_reset_msi(struct pci_dev *pdev) -{ -/* the #defines are stolen from drivers/pci/msi.h. */ -#define msi_control_reg(base) (base + PCI_MSI_FLAGS) -#define PCI_MSIX_FLAGS_ENABLE (1 << 15) - - int pos; - u16 control = 0; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); - if (pos) { - pci_read_config_word(pdev, msi_control_reg(pos), &control); - if (control & PCI_MSI_FLAGS_ENABLE) { - dev_info(&pdev->dev, "resetting MSI\n"); - pci_write_config_word(pdev, msi_control_reg(pos), - control & ~PCI_MSI_FLAGS_ENABLE); - } - } - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_word(pdev, msi_control_reg(pos), &control); - if (control & PCI_MSIX_FLAGS_ENABLE) { - dev_info(&pdev->dev, "resetting MSI-X\n"); - pci_write_config_word(pdev, msi_control_reg(pos), - control & ~PCI_MSIX_FLAGS_ENABLE); - } - } - - return 0; -} - static int hpsa_controller_hard_reset(struct pci_dev *pdev, void * __iomem vaddr, bool use_doorbell) { @@ -3142,17 +3190,17 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev, */ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) { - u16 saved_config_space[32]; u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; void __iomem *vaddr; unsigned long paddr; u32 misc_fw_support, active_transport; - int rc, i; + int rc; struct CfgTable __iomem *cfgtable; bool use_doorbell; u32 board_id; + u16 command_register; /* For controllers as old as the P600, this is very nearly * the same thing as @@ -3162,14 +3210,6 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) * pci_set_power_state(pci_dev, PCI_D0); * pci_restore_state(pci_dev); * - * but we can't use these nice canned kernel routines on - * kexec, because they also check the MSI/MSI-X state in PCI - * configuration space and do the wrong thing when it is - * set/cleared. Also, the pci_save/restore_state functions - * violate the ordering requirements for restoring the - * configuration space from the CCISS document (see the - * comment below). So we roll our own .... - * * For controllers newer than the P600, the pci power state * method of resetting doesn't work so we have another way * using the doorbell register. @@ -3182,13 +3222,21 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) * likely not be happy. Just forbid resetting this conjoined mess. * The 640x isn't really supported by hpsa anyway. */ - hpsa_lookup_board_id(pdev, &board_id); + rc = hpsa_lookup_board_id(pdev, &board_id); + if (rc < 0) { + dev_warn(&pdev->dev, "Not resetting device.\n"); + return -ENODEV; + } if (board_id == 0x409C0E11 || board_id == 0x409D0E11) return -ENOTSUPP; - for (i = 0; i < 32; i++) - pci_read_config_word(pdev, 2*i, &saved_config_space[i]); - + /* Save the PCI command register */ + pci_read_config_word(pdev, 4, &command_register); + /* Turn the board off. This is so that later pci_restore_state() + * won't turn the board on before the rest of config space is ready. + */ + pci_disable_device(pdev); + pci_save_state(pdev); /* find the first memory BAR, so we can find the cfg table */ rc = hpsa_pci_find_memory_BAR(pdev, &paddr); @@ -3214,46 +3262,47 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) misc_fw_support = readl(&cfgtable->misc_fw_support); use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; - /* The doorbell reset seems to cause lockups on some Smart - * Arrays (e.g. P410, P410i, maybe others). Until this is - * fixed or at least isolated, avoid the doorbell reset. - */ - use_doorbell = 0; - rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; - /* Restore the PCI configuration space. The Open CISS - * Specification says, "Restore the PCI Configuration - * Registers, offsets 00h through 60h. It is important to - * restore the command register, 16-bits at offset 04h, - * last. Do not restore the configuration status register, - * 16-bits at offset 06h." Note that the offset is 2*i. - */ - for (i = 0; i < 32; i++) { - if (i == 2 || i == 3) - continue; - pci_write_config_word(pdev, 2*i, saved_config_space[i]); + pci_restore_state(pdev); + rc = pci_enable_device(pdev); + if (rc) { + dev_warn(&pdev->dev, "failed to enable device.\n"); + goto unmap_cfgtable; } - wmb(); - pci_write_config_word(pdev, 4, saved_config_space[2]); + pci_write_config_word(pdev, 4, command_register); /* Some devices (notably the HP Smart Array 5i Controller) need a little pause here */ msleep(HPSA_POST_RESET_PAUSE_MSECS); + /* Wait for board to become not ready, then ready. */ + dev_info(&pdev->dev, "Waiting for board to become ready.\n"); + rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); + if (rc) + dev_warn(&pdev->dev, + "failed waiting for board to become not ready\n"); + rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); + if (rc) { + dev_warn(&pdev->dev, + "failed waiting for board to become ready\n"); + goto unmap_cfgtable; + } + dev_info(&pdev->dev, "board ready.\n"); + /* Controller should be in simple mode at this point. If it's not, * It means we're on one of those controllers which doesn't support * the doorbell reset method and on which the PCI power management reset * method doesn't work (P800, for example.) - * In those cases, pretend the reset worked and hope for the best. + * In those cases, don't try to proceed, as it generally doesn't work. */ active_transport = readl(&cfgtable->TransportActive); if (active_transport & PERFORMANT_MODE) { dev_warn(&pdev->dev, "Unable to successfully reset controller," - " proceeding anyway.\n"); - rc = -ENOTSUPP; + " Ignoring controller.\n"); + rc = -ENODEV; } unmap_cfgtable: @@ -3386,7 +3435,7 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) default_int_mode: #endif /* CONFIG_PCI_MSI */ /* if we get here we're going to use the default interrupt mode */ - h->intr[PERF_MODE_INT] = h->pdev->irq; + h->intr[h->intr_mode] = h->pdev->irq; } static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) @@ -3438,18 +3487,28 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, return -ENODEV; } -static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h) +static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, + void __iomem *vaddr, int wait_for_ready) { - int i; + int i, iterations; u32 scratchpad; + if (wait_for_ready) + iterations = HPSA_BOARD_READY_ITERATIONS; + else + iterations = HPSA_BOARD_NOT_READY_ITERATIONS; - for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) { - scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); - if (scratchpad == HPSA_FIRMWARE_READY) - return 0; + for (i = 0; i < iterations; i++) { + scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); + if (wait_for_ready) { + if (scratchpad == HPSA_FIRMWARE_READY) + return 0; + } else { + if (scratchpad != HPSA_FIRMWARE_READY) + return 0; + } msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); } - dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); + dev_warn(&pdev->dev, "board not ready, timed out.\n"); return -ENODEV; } @@ -3497,6 +3556,11 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) { h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); + + /* Limit commands in memory limited kdump scenario. */ + if (reset_devices && h->max_commands > 32) + h->max_commands = 32; + if (h->max_commands < 16) { dev_warn(&h->pdev->dev, "Controller reports " "max supported commands of %d, an obvious lie. " @@ -3571,16 +3635,21 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) { int i; + u32 doorbell_value; + unsigned long flags; /* under certain very rare conditions, this can take awhile. * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right * as we enter this code.) */ for (i = 0; i < MAX_CONFIG_WAIT; i++) { - if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) + spin_lock_irqsave(&h->lock, flags); + doorbell_value = readl(h->vaddr + SA5_DOORBELL); + spin_unlock_irqrestore(&h->lock, flags); + if (!(doorbell_value & CFGTBL_ChangeReq)) break; /* delay and try again */ - msleep(10); + usleep_range(10000, 20000); } } @@ -3603,6 +3672,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h) "unable to get board into simple mode\n"); return -ENODEV; } + h->transMethod = CFGTBL_Trans_Simple; return 0; } @@ -3641,7 +3711,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h) err = -ENOMEM; goto err_out_free_res; } - err = hpsa_wait_for_board_ready(h); + err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (err) goto err_out_free_res; err = hpsa_find_cfgtables(h); @@ -3710,8 +3780,6 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) return 0; /* just try to do the kdump anyhow. */ if (rc) return -ENODEV; - if (hpsa_reset_msi(pdev)) - return -ENODEV; /* Now try to get the controller to respond to a no-op */ for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { @@ -3749,8 +3817,11 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, h->pdev = pdev; h->busy_initializing = 1; - INIT_HLIST_HEAD(&h->cmpQ); - INIT_HLIST_HEAD(&h->reqQ); + h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; + INIT_LIST_HEAD(&h->cmpQ); + INIT_LIST_HEAD(&h->reqQ); + spin_lock_init(&h->lock); + spin_lock_init(&h->scan_lock); rc = hpsa_pci_init(h); if (rc != 0) goto clean1; @@ -3777,20 +3848,20 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, h->access.set_intr_mask(h, HPSA_INTR_OFF); if (h->msix_vector || h->msi_vector) - rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi, + rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi, IRQF_DISABLED, h->devname, h); else - rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx, + rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_intx, IRQF_DISABLED, h->devname, h); if (rc) { dev_err(&pdev->dev, "unable to get irq %d for %s\n", - h->intr[PERF_MODE_INT], h->devname); + h->intr[h->intr_mode], h->devname); goto clean2; } dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", h->devname, pdev->device, - h->intr[PERF_MODE_INT], dac ? "" : " not"); + h->intr[h->intr_mode], dac ? "" : " not"); h->cmd_pool_bits = kmalloc(((h->nr_cmds + BITS_PER_LONG - @@ -3810,8 +3881,6 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, } if (hpsa_allocate_sg_chain_blocks(h)) goto clean4; - spin_lock_init(&h->lock); - spin_lock_init(&h->scan_lock); init_waitqueue_head(&h->scan_wait_queue); h->scan_finished = 1; /* no scan currently in progress */ @@ -3843,7 +3912,7 @@ clean4: h->nr_cmds * sizeof(struct ErrorInfo), h->errinfo_pool, h->errinfo_pool_dhandle); - free_irq(h->intr[PERF_MODE_INT], h); + free_irq(h->intr[h->intr_mode], h); clean2: clean1: h->busy_initializing = 0; @@ -3887,7 +3956,7 @@ static void hpsa_shutdown(struct pci_dev *pdev) */ hpsa_flush_cache(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); - free_irq(h->intr[PERF_MODE_INT], h); + free_irq(h->intr[h->intr_mode], h); #ifdef CONFIG_PCI_MSI if (h->msix_vector) pci_disable_msix(h->pdev); @@ -3989,7 +4058,8 @@ static void calc_bucket_map(int bucket[], int num_buckets, } } -static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h) +static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h, + u32 use_short_tags) { int i; unsigned long register_value; @@ -4037,7 +4107,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h) writel(0, &h->transtable->RepQCtrAddrHigh32); writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); writel(0, &h->transtable->RepQAddr0High32); - writel(CFGTBL_Trans_Performant, + writel(CFGTBL_Trans_Performant | use_short_tags, &(h->cfgtable->HostWrite.TransportRequest)); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); hpsa_wait_for_mode_change_ack(h); @@ -4047,12 +4117,18 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h) " performant mode\n"); return; } + /* Change the access methods to the performant access methods */ + h->access = SA5_performant_access; + h->transMethod = CFGTBL_Trans_Performant; } static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) { u32 trans_support; + if (hpsa_simple_mode) + return; + trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & PERFORMANT_MODE)) return; @@ -4072,11 +4148,8 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) || (h->blockFetchTable == NULL)) goto clean_up; - hpsa_enter_performant_mode(h); - - /* Change the access methods to the performant access methods */ - h->access = SA5_performant_access; - h->transMethod = CFGTBL_Trans_Performant; + hpsa_enter_performant_mode(h, + trans_support & CFGTBL_Trans_use_short_tags); return; diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index 19586e189f0f..621a1530054a 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -72,11 +72,12 @@ struct ctlr_info { unsigned int intr[4]; unsigned int msix_vector; unsigned int msi_vector; + int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ struct access_method access; /* queue and queue Info */ - struct hlist_head reqQ; - struct hlist_head cmpQ; + struct list_head reqQ; + struct list_head cmpQ; unsigned int Qdepth; unsigned int maxQsinceinit; unsigned int maxSG; @@ -154,12 +155,16 @@ struct ctlr_info { * HPSA_BOARD_READY_ITERATIONS are derived from those. */ #define HPSA_BOARD_READY_WAIT_SECS (120) +#define HPSA_BOARD_NOT_READY_WAIT_SECS (10) #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100) #define HPSA_BOARD_READY_POLL_INTERVAL \ ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000) #define HPSA_BOARD_READY_ITERATIONS \ ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \ HPSA_BOARD_READY_POLL_INTERVAL_MSECS) +#define HPSA_BOARD_NOT_READY_ITERATIONS \ + ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \ + HPSA_BOARD_READY_POLL_INTERVAL_MSECS) #define HPSA_POST_RESET_PAUSE_MSECS (3000) #define HPSA_POST_RESET_NOOP_RETRIES (12) diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index f5c4c3cc0530..18464900e761 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -104,6 +104,7 @@ #define CFGTBL_Trans_Simple 0x00000002l #define CFGTBL_Trans_Performant 0x00000004l +#define CFGTBL_Trans_use_short_tags 0x20000000l #define CFGTBL_BusType_Ultra2 0x00000001l #define CFGTBL_BusType_Ultra3 0x00000002l @@ -265,6 +266,7 @@ struct ErrorInfo { #define DIRECT_LOOKUP_SHIFT 5 #define DIRECT_LOOKUP_BIT 0x10 +#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1)) #define HPSA_ERROR_BIT 0x02 struct ctlr_info; /* defined in hpsa.h */ @@ -291,7 +293,7 @@ struct CommandList { struct ctlr_info *h; int cmd_type; long cmdindex; - struct hlist_node list; + struct list_head list; struct request *rq; struct completion *waiting; void *scsi_cmd; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index d841e98a8bd5..0621238fac4a 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -1301,7 +1301,7 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg, ipr_clear_res_target(res); list_move_tail(&res->queue, &ioa_cfg->free_res_q); } - } else if (!res->sdev) { + } else if (!res->sdev || res->del_from_ml) { res->add_to_ml = 1; if (ioa_cfg->allow_ml_add_del) schedule_work(&ioa_cfg->work_q); @@ -3104,7 +3104,10 @@ restart: did_work = 1; sdev = res->sdev; if (!scsi_device_get(sdev)) { - list_move_tail(&res->queue, &ioa_cfg->free_res_q); + if (!res->add_to_ml) + list_move_tail(&res->queue, &ioa_cfg->free_res_q); + else + res->del_from_ml = 0; spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); scsi_remove_device(sdev); scsi_device_put(sdev); @@ -8864,7 +8867,7 @@ static void __ipr_remove(struct pci_dev *pdev) spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); - flush_scheduled_work(); + flush_work_sync(&ioa_cfg->work_q); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); spin_lock(&ipr_driver_lock); diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index fec47de72535..a860452a8f71 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -608,54 +608,12 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) iscsi_sw_tcp_release_conn(conn); } -static int iscsi_sw_tcp_get_addr(struct iscsi_conn *conn, struct socket *sock, - char *buf, int *port, - int (*getname)(struct socket *, - struct sockaddr *, - int *addrlen)) -{ - struct sockaddr_storage *addr; - struct sockaddr_in6 *sin6; - struct sockaddr_in *sin; - int rc = 0, len; - - addr = kmalloc(sizeof(*addr), GFP_KERNEL); - if (!addr) - return -ENOMEM; - - if (getname(sock, (struct sockaddr *) addr, &len)) { - rc = -ENODEV; - goto free_addr; - } - - switch (addr->ss_family) { - case AF_INET: - sin = (struct sockaddr_in *)addr; - spin_lock_bh(&conn->session->lock); - sprintf(buf, "%pI4", &sin->sin_addr.s_addr); - *port = be16_to_cpu(sin->sin_port); - spin_unlock_bh(&conn->session->lock); - break; - case AF_INET6: - sin6 = (struct sockaddr_in6 *)addr; - spin_lock_bh(&conn->session->lock); - sprintf(buf, "%pI6", &sin6->sin6_addr); - *port = be16_to_cpu(sin6->sin6_port); - spin_unlock_bh(&conn->session->lock); - break; - } -free_addr: - kfree(addr); - return rc; -} - static int iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, struct iscsi_cls_conn *cls_conn, uint64_t transport_eph, int is_leading) { - struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); - struct iscsi_host *ihost = shost_priv(shost); + struct iscsi_session *session = cls_session->dd_data; struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; @@ -670,27 +628,15 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session, "sockfd_lookup failed %d\n", err); return -EEXIST; } - /* - * copy these values now because if we drop the session - * userspace may still want to query the values since we will - * be using them for the reconnect - */ - err = iscsi_sw_tcp_get_addr(conn, sock, conn->portal_address, - &conn->portal_port, kernel_getpeername); - if (err) - goto free_socket; - - err = iscsi_sw_tcp_get_addr(conn, sock, ihost->local_address, - &ihost->local_port, kernel_getsockname); - if (err) - goto free_socket; err = iscsi_conn_bind(cls_session, cls_conn, is_leading); if (err) goto free_socket; + spin_lock_bh(&session->lock); /* bind iSCSI connection and socket */ tcp_sw_conn->sock = sock; + spin_unlock_bh(&session->lock); /* setup Socket parameters */ sk = sock->sk; @@ -752,24 +698,74 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { struct iscsi_conn *conn = cls_conn->dd_data; - int len; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; + struct sockaddr_in6 addr; + int rc, len; switch(param) { case ISCSI_PARAM_CONN_PORT: - spin_lock_bh(&conn->session->lock); - len = sprintf(buf, "%hu\n", conn->portal_port); - spin_unlock_bh(&conn->session->lock); - break; case ISCSI_PARAM_CONN_ADDRESS: spin_lock_bh(&conn->session->lock); - len = sprintf(buf, "%s\n", conn->portal_address); + if (!tcp_sw_conn || !tcp_sw_conn->sock) { + spin_unlock_bh(&conn->session->lock); + return -ENOTCONN; + } + rc = kernel_getpeername(tcp_sw_conn->sock, + (struct sockaddr *)&addr, &len); spin_unlock_bh(&conn->session->lock); - break; + if (rc) + return rc; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &addr, param, buf); default: return iscsi_conn_get_param(cls_conn, param, buf); } - return len; + return 0; +} + +static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, + enum iscsi_host_param param, char *buf) +{ + struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost); + struct iscsi_session *session = tcp_sw_host->session; + struct iscsi_conn *conn; + struct iscsi_tcp_conn *tcp_conn; + struct iscsi_sw_tcp_conn *tcp_sw_conn; + struct sockaddr_in6 addr; + int rc, len; + + switch (param) { + case ISCSI_HOST_PARAM_IPADDRESS: + spin_lock_bh(&session->lock); + conn = session->leadconn; + if (!conn) { + spin_unlock_bh(&session->lock); + return -ENOTCONN; + } + tcp_conn = conn->dd_data; + + tcp_sw_conn = tcp_conn->dd_data; + if (!tcp_sw_conn->sock) { + spin_unlock_bh(&session->lock); + return -ENOTCONN; + } + + rc = kernel_getsockname(tcp_sw_conn->sock, + (struct sockaddr *)&addr, &len); + spin_unlock_bh(&session->lock); + if (rc) + return rc; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &addr, param, buf); + default: + return iscsi_host_get_param(shost, param, buf); + } + + return 0; } static void @@ -797,6 +793,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, { struct iscsi_cls_session *cls_session; struct iscsi_session *session; + struct iscsi_sw_tcp_host *tcp_sw_host; struct Scsi_Host *shost; if (ep) { @@ -804,7 +801,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, return NULL; } - shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, 0, 1); + shost = iscsi_host_alloc(&iscsi_sw_tcp_sht, + sizeof(struct iscsi_sw_tcp_host), 1); if (!shost) return NULL; shost->transportt = iscsi_sw_tcp_scsi_transport; @@ -825,6 +823,8 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, if (!cls_session) goto remove_host; session = cls_session->dd_data; + tcp_sw_host = iscsi_host_priv(shost); + tcp_sw_host->session = session; shost->can_queue = session->scsi_cmds_max; if (iscsi_tcp_r2tpool_alloc(session)) @@ -929,7 +929,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = { .start_conn = iscsi_conn_start, .stop_conn = iscsi_sw_tcp_conn_stop, /* iscsi host params */ - .get_host_param = iscsi_host_get_param, + .get_host_param = iscsi_sw_tcp_host_get_param, .set_host_param = iscsi_host_set_param, /* IO */ .send_pdu = iscsi_conn_send_pdu, diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 94644bad0ed7..666fe09378fa 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -55,6 +55,10 @@ struct iscsi_sw_tcp_conn { ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int); }; +struct iscsi_sw_tcp_host { + struct iscsi_session *session; +}; + struct iscsi_sw_tcp_hdrbuf { struct iscsi_hdr hdrbuf; char hdrextbuf[ISCSI_MAX_AHS_SIZE + diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index d21367d3305f..28231badd9e6 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -38,7 +38,7 @@ u16 fc_cpu_mask; /* cpu mask for possible cpus */ EXPORT_SYMBOL(fc_cpu_mask); static u16 fc_cpu_order; /* 2's power to represent total possible cpus */ static struct kmem_cache *fc_em_cachep; /* cache for exchanges */ -struct workqueue_struct *fc_exch_workqueue; +static struct workqueue_struct *fc_exch_workqueue; /* * Structure and function definitions for managing Fibre Channel Exchanges @@ -558,6 +558,22 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp) return sp; } +/* + * Set the response handler for the exchange associated with a sequence. + */ +static void fc_seq_set_resp(struct fc_seq *sp, + void (*resp)(struct fc_seq *, struct fc_frame *, + void *), + void *arg) +{ + struct fc_exch *ep = fc_seq_exch(sp); + + spin_lock_bh(&ep->ex_lock); + ep->resp = resp; + ep->arg = arg; + spin_unlock_bh(&ep->ex_lock); +} + /** * fc_seq_exch_abort() - Abort an exchange and sequence * @req_sp: The sequence to be aborted @@ -650,13 +666,10 @@ static void fc_exch_timeout(struct work_struct *work) if (e_stat & ESB_ST_ABNORMAL) rc = fc_exch_done_locked(ep); spin_unlock_bh(&ep->ex_lock); + if (!rc) + fc_exch_delete(ep); if (resp) resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); - if (!rc) { - /* delete the exchange if it's already being aborted */ - fc_exch_delete(ep); - return; - } fc_seq_exch_abort(sp, 2 * ep->r_a_tov); goto done; } @@ -1266,6 +1279,8 @@ free: * @fp: The request frame * * On success, the sequence pointer will be returned and also in fr_seq(@fp). + * A reference will be held on the exchange/sequence for the caller, which + * must call fc_seq_release(). */ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) { @@ -1283,6 +1298,15 @@ static struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp) } /** + * fc_seq_release() - Release the hold + * @sp: The sequence. + */ +static void fc_seq_release(struct fc_seq *sp) +{ + fc_exch_release(fc_seq_exch(sp)); +} + +/** * fc_exch_recv_req() - Handler for an incoming request * @lport: The local port that received the request * @mp: The EM that the exchange is on @@ -2151,6 +2175,7 @@ err: fc_exch_mgr_del(ema); return -ENOMEM; } +EXPORT_SYMBOL(fc_exch_mgr_list_clone); /** * fc_exch_mgr_alloc() - Allocate an exchange manager @@ -2254,16 +2279,45 @@ void fc_exch_mgr_free(struct fc_lport *lport) EXPORT_SYMBOL(fc_exch_mgr_free); /** + * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending + * upon 'xid'. + * @f_ctl: f_ctl + * @lport: The local port the frame was received on + * @fh: The received frame header + */ +static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl, + struct fc_lport *lport, + struct fc_frame_header *fh) +{ + struct fc_exch_mgr_anchor *ema; + u16 xid; + + if (f_ctl & FC_FC_EX_CTX) + xid = ntohs(fh->fh_ox_id); + else { + xid = ntohs(fh->fh_rx_id); + if (xid == FC_XID_UNKNOWN) + return list_entry(lport->ema_list.prev, + typeof(*ema), ema_list); + } + + list_for_each_entry(ema, &lport->ema_list, ema_list) { + if ((xid >= ema->mp->min_xid) && + (xid <= ema->mp->max_xid)) + return ema; + } + return NULL; +} +/** * fc_exch_recv() - Handler for received frames * @lport: The local port the frame was received on - * @fp: The received frame + * @fp: The received frame */ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) { struct fc_frame_header *fh = fc_frame_header_get(fp); struct fc_exch_mgr_anchor *ema; - u32 f_ctl, found = 0; - u16 oxid; + u32 f_ctl; /* lport lock ? */ if (!lport || lport->state == LPORT_ST_DISABLED) { @@ -2274,24 +2328,17 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp) } f_ctl = ntoh24(fh->fh_f_ctl); - oxid = ntohs(fh->fh_ox_id); - if (f_ctl & FC_FC_EX_CTX) { - list_for_each_entry(ema, &lport->ema_list, ema_list) { - if ((oxid >= ema->mp->min_xid) && - (oxid <= ema->mp->max_xid)) { - found = 1; - break; - } - } - - if (!found) { - FC_LPORT_DBG(lport, "Received response for out " - "of range oxid:%hx\n", oxid); - fc_frame_free(fp); - return; - } - } else - ema = list_entry(lport->ema_list.prev, typeof(*ema), ema_list); + ema = fc_find_ema(f_ctl, lport, fh); + if (!ema) { + FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor," + "fc_ctl <0x%x>, xid <0x%x>\n", + f_ctl, + (f_ctl & FC_FC_EX_CTX) ? + ntohs(fh->fh_ox_id) : + ntohs(fh->fh_rx_id)); + fc_frame_free(fp); + return; + } /* * If frame is marked invalid, just drop it. @@ -2329,6 +2376,9 @@ int fc_exch_init(struct fc_lport *lport) if (!lport->tt.seq_start_next) lport->tt.seq_start_next = fc_seq_start_next; + if (!lport->tt.seq_set_resp) + lport->tt.seq_set_resp = fc_seq_set_resp; + if (!lport->tt.exch_seq_send) lport->tt.exch_seq_send = fc_exch_seq_send; @@ -2350,6 +2400,9 @@ int fc_exch_init(struct fc_lport *lport) if (!lport->tt.seq_assign) lport->tt.seq_assign = fc_seq_assign; + if (!lport->tt.seq_release) + lport->tt.seq_release = fc_seq_release; + return 0; } EXPORT_SYMBOL(fc_exch_init); @@ -2357,7 +2410,7 @@ EXPORT_SYMBOL(fc_exch_init); /** * fc_setup_exch_mgr() - Setup an exchange manager */ -int fc_setup_exch_mgr() +int fc_setup_exch_mgr(void) { fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch), 0, SLAB_HWCACHE_ALIGN, NULL); @@ -2395,7 +2448,7 @@ int fc_setup_exch_mgr() /** * fc_destroy_exch_mgr() - Destroy an exchange manager */ -void fc_destroy_exch_mgr() +void fc_destroy_exch_mgr(void) { destroy_workqueue(fc_exch_workqueue); kmem_cache_destroy(fc_em_cachep); diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 5962d1a5a674..b1b03af158bf 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -42,7 +42,7 @@ #include "fc_libfc.h" -struct kmem_cache *scsi_pkt_cachep; +static struct kmem_cache *scsi_pkt_cachep; /* SRB state definitions */ #define FC_SRB_FREE 0 /* cmd is free */ @@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) if (fsp) { memset(fsp, 0, sizeof(*fsp)); fsp->lp = lport; + fsp->xfer_ddp = FC_XID_UNKNOWN; atomic_set(&fsp->ref_cnt, 1); init_timer(&fsp->timer); INIT_LIST_HEAD(&fsp->list); @@ -1201,6 +1202,7 @@ unlock: static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) { int rc = FAILED; + unsigned long ticks_left; if (fc_fcp_send_abort(fsp)) return FAILED; @@ -1209,13 +1211,13 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) fsp->wait_for_comp = 1; spin_unlock_bh(&fsp->scsi_pkt_lock); - rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV); + ticks_left = wait_for_completion_timeout(&fsp->tm_done, + FC_SCSI_TM_TOV); spin_lock_bh(&fsp->scsi_pkt_lock); fsp->wait_for_comp = 0; - if (!rc) { + if (!ticks_left) { FC_FCP_DBG(fsp, "target abort cmd failed\n"); - rc = FAILED; } else if (fsp->state & FC_SRB_ABORTED) { FC_FCP_DBG(fsp, "target abort cmd passed\n"); rc = SUCCESS; @@ -1321,7 +1323,7 @@ static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg) * * scsi-eh will escalate for when either happens. */ - goto out; + return; } if (fc_fcp_lock_pkt(fsp)) @@ -1787,15 +1789,14 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) /** * fc_queuecommand() - The queuecommand function of the SCSI template + * @shost: The Scsi_Host that the command was issued to * @cmd: The scsi_cmnd to be executed - * @done: The callback function to be called when the scsi_cmnd is complete * - * This is the i/o strategy routine, called by the SCSI layer. This routine - * is called with the host_lock held. + * This is the i/o strategy routine, called by the SCSI layer. */ -static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) +int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd) { - struct fc_lport *lport; + struct fc_lport *lport = shost_priv(shost); struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); struct fc_fcp_pkt *fsp; struct fc_rport_libfc_priv *rpriv; @@ -1803,15 +1804,12 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs int rc = 0; struct fcoe_dev_stats *stats; - lport = shost_priv(sc_cmd->device->host); - rval = fc_remote_port_chkready(rport); if (rval) { sc_cmd->result = rval; - done(sc_cmd); + sc_cmd->scsi_done(sc_cmd); return 0; } - spin_unlock_irq(lport->host->host_lock); if (!*(struct fc_remote_port **)rport->dd_data) { /* @@ -1819,7 +1817,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs * online */ sc_cmd->result = DID_IMM_RETRY << 16; - done(sc_cmd); + sc_cmd->scsi_done(sc_cmd); goto out; } @@ -1842,10 +1840,7 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs * build the libfc request pkt */ fsp->cmd = sc_cmd; /* save the cmd */ - fsp->lp = lport; /* save the softc ptr */ fsp->rport = rport; /* set the remote port ptr */ - fsp->xfer_ddp = FC_XID_UNKNOWN; - sc_cmd->scsi_done = done; /* * set up the transfer length @@ -1886,11 +1881,8 @@ static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scs rc = SCSI_MLQUEUE_HOST_BUSY; } out: - spin_lock_irq(lport->host->host_lock); return rc; } - -DEF_SCSI_QCMD(fc_queuecommand) EXPORT_SYMBOL(fc_queuecommand); /** @@ -2112,7 +2104,6 @@ int fc_eh_device_reset(struct scsi_cmnd *sc_cmd) * the sc passed in is not setup for execution like when sent * through the queuecommand callout. */ - fsp->lp = lport; /* save the softc ptr */ fsp->rport = rport; /* set the remote port ptr */ /* @@ -2245,7 +2236,7 @@ void fc_fcp_destroy(struct fc_lport *lport) } EXPORT_SYMBOL(fc_fcp_destroy); -int fc_setup_fcp() +int fc_setup_fcp(void) { int rc = 0; @@ -2261,7 +2252,7 @@ int fc_setup_fcp() return rc; } -void fc_destroy_fcp() +void fc_destroy_fcp(void) { if (scsi_pkt_cachep) kmem_cache_destroy(scsi_pkt_cachep); diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c index 6a48c28e4420..b7735129f1f3 100644 --- a/drivers/scsi/libfc/fc_libfc.c +++ b/drivers/scsi/libfc/fc_libfc.c @@ -35,6 +35,27 @@ unsigned int fc_debug_logging; module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels"); +DEFINE_MUTEX(fc_prov_mutex); +static LIST_HEAD(fc_local_ports); +struct blocking_notifier_head fc_lport_notifier_head = + BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head); +EXPORT_SYMBOL(fc_lport_notifier_head); + +/* + * Providers which primarily send requests and PRLIs. + */ +struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = { + [0] = &fc_rport_t0_prov, + [FC_TYPE_FCP] = &fc_rport_fcp_init, +}; + +/* + * Providers which receive requests. + */ +struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = { + [FC_TYPE_ELS] = &fc_lport_els_prov, +}; + /** * libfc_init() - Initialize libfc.ko */ @@ -210,3 +231,102 @@ void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp, fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset); } EXPORT_SYMBOL(fc_fill_reply_hdr); + +/** + * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport + * if there is service provider (target provider) registered with libfc + * for specified "fc_ft_type" + * @lport: Local port which service_params needs to be modified + * @type: FC-4 type, such as FC_TYPE_FCP + */ +void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type) +{ + struct fc4_prov *prov_entry; + BUG_ON(type >= FC_FC4_PROV_SIZE); + BUG_ON(!lport); + prov_entry = fc_passive_prov[type]; + if (type == FC_TYPE_FCP) { + if (prov_entry && prov_entry->recv) + lport->service_params |= FCP_SPPF_TARG_FCN; + } +} + +void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg) +{ + struct fc_lport *lport; + + mutex_lock(&fc_prov_mutex); + list_for_each_entry(lport, &fc_local_ports, lport_list) + notify(lport, arg); + mutex_unlock(&fc_prov_mutex); +} +EXPORT_SYMBOL(fc_lport_iterate); + +/** + * fc_fc4_register_provider() - register FC-4 upper-level provider. + * @type: FC-4 type, such as FC_TYPE_FCP + * @prov: structure describing provider including ops vector. + * + * Returns 0 on success, negative error otherwise. + */ +int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov) +{ + struct fc4_prov **prov_entry; + int ret = 0; + + if (type >= FC_FC4_PROV_SIZE) + return -EINVAL; + mutex_lock(&fc_prov_mutex); + prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type; + if (*prov_entry) + ret = -EBUSY; + else + *prov_entry = prov; + mutex_unlock(&fc_prov_mutex); + return ret; +} +EXPORT_SYMBOL(fc_fc4_register_provider); + +/** + * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider. + * @type: FC-4 type, such as FC_TYPE_FCP + * @prov: structure describing provider including ops vector. + */ +void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov) +{ + BUG_ON(type >= FC_FC4_PROV_SIZE); + mutex_lock(&fc_prov_mutex); + if (prov->recv) + rcu_assign_pointer(fc_passive_prov[type], NULL); + else + rcu_assign_pointer(fc_active_prov[type], NULL); + mutex_unlock(&fc_prov_mutex); + synchronize_rcu(); +} +EXPORT_SYMBOL(fc_fc4_deregister_provider); + +/** + * fc_fc4_add_lport() - add new local port to list and run notifiers. + * @lport: The new local port. + */ +void fc_fc4_add_lport(struct fc_lport *lport) +{ + mutex_lock(&fc_prov_mutex); + list_add_tail(&lport->lport_list, &fc_local_ports); + blocking_notifier_call_chain(&fc_lport_notifier_head, + FC_LPORT_EV_ADD, lport); + mutex_unlock(&fc_prov_mutex); +} + +/** + * fc_fc4_del_lport() - remove local port from list and run notifiers. + * @lport: The new local port. + */ +void fc_fc4_del_lport(struct fc_lport *lport) +{ + mutex_lock(&fc_prov_mutex); + list_del(&lport->lport_list); + blocking_notifier_call_chain(&fc_lport_notifier_head, + FC_LPORT_EV_DEL, lport); + mutex_unlock(&fc_prov_mutex); +} diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h index eea0c3541b71..fedc819d70c0 100644 --- a/drivers/scsi/libfc/fc_libfc.h +++ b/drivers/scsi/libfc/fc_libfc.h @@ -94,6 +94,17 @@ extern unsigned int fc_debug_logging; (lport)->host->host_no, ##args)) /* + * FC-4 Providers. + */ +extern struct fc4_prov *fc_active_prov[]; /* providers without recv */ +extern struct fc4_prov *fc_passive_prov[]; /* providers with recv */ +extern struct mutex fc_prov_mutex; /* lock over table changes */ + +extern struct fc4_prov fc_rport_t0_prov; /* type 0 provider */ +extern struct fc4_prov fc_lport_els_prov; /* ELS provider */ +extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */ + +/* * Set up direct-data placement for this I/O request */ void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid); @@ -112,6 +123,9 @@ void fc_destroy_fcp(void); * Internal libfc functions */ const char *fc_els_resp_type(struct fc_frame *); +extern void fc_fc4_add_lport(struct fc_lport *); +extern void fc_fc4_del_lport(struct fc_lport *); +extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type); /* * Copies a buffer into an sg list diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index c5a10f94f845..8c08b210001d 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -633,6 +633,7 @@ int fc_lport_destroy(struct fc_lport *lport) lport->tt.fcp_abort_io(lport); lport->tt.disc_stop_final(lport); lport->tt.exch_mgr_reset(lport, 0, 0); + fc_fc4_del_lport(lport); return 0; } EXPORT_SYMBOL(fc_lport_destroy); @@ -849,7 +850,7 @@ out: } /** - * fc_lport_recv_req() - The generic lport request handler + * fc_lport_recv_els_req() - The generic lport ELS request handler * @lport: The local port that received the request * @fp: The request frame * @@ -859,9 +860,9 @@ out: * Locking Note: This function should not be called with the lport * lock held becuase it will grab the lock. */ -static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp) +static void fc_lport_recv_els_req(struct fc_lport *lport, + struct fc_frame *fp) { - struct fc_frame_header *fh = fc_frame_header_get(fp); void (*recv)(struct fc_lport *, struct fc_frame *); mutex_lock(&lport->lp_mutex); @@ -873,8 +874,7 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp) */ if (!lport->link_up) fc_frame_free(fp); - else if (fh->fh_type == FC_TYPE_ELS && - fh->fh_r_ctl == FC_RCTL_ELS_REQ) { + else { /* * Check opcode. */ @@ -903,14 +903,62 @@ static void fc_lport_recv_req(struct fc_lport *lport, struct fc_frame *fp) } recv(lport, fp); - } else { - FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n", - fr_eof(fp)); - fc_frame_free(fp); } mutex_unlock(&lport->lp_mutex); } +static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *spp_in, + struct fc_els_spp *spp_out) +{ + return FC_SPP_RESP_INVL; +} + +struct fc4_prov fc_lport_els_prov = { + .prli = fc_lport_els_prli, + .recv = fc_lport_recv_els_req, +}; + +/** + * fc_lport_recv_req() - The generic lport request handler + * @lport: The lport that received the request + * @fp: The frame the request is in + * + * Locking Note: This function should not be called with the lport + * lock held becuase it may grab the lock. + */ +static void fc_lport_recv_req(struct fc_lport *lport, + struct fc_frame *fp) +{ + struct fc_frame_header *fh = fc_frame_header_get(fp); + struct fc_seq *sp = fr_seq(fp); + struct fc4_prov *prov; + + /* + * Use RCU read lock and module_lock to be sure module doesn't + * deregister and get unloaded while we're calling it. + * try_module_get() is inlined and accepts a NULL parameter. + * Only ELSes and FCP target ops should come through here. + * The locking is unfortunate, and a better scheme is being sought. + */ + + rcu_read_lock(); + if (fh->fh_type >= FC_FC4_PROV_SIZE) + goto drop; + prov = rcu_dereference(fc_passive_prov[fh->fh_type]); + if (!prov || !try_module_get(prov->module)) + goto drop; + rcu_read_unlock(); + prov->recv(lport, fp); + module_put(prov->module); + return; +drop: + rcu_read_unlock(); + FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type); + fc_frame_free(fp); + lport->tt.exch_done(sp); +} + /** * fc_lport_reset() - Reset a local port * @lport: The local port which should be reset @@ -1542,6 +1590,7 @@ void fc_lport_enter_flogi(struct fc_lport *lport) */ int fc_lport_config(struct fc_lport *lport) { + INIT_LIST_HEAD(&lport->ema_list); INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout); mutex_init(&lport->lp_mutex); @@ -1549,6 +1598,7 @@ int fc_lport_config(struct fc_lport *lport) fc_lport_add_fc4_type(lport, FC_TYPE_FCP); fc_lport_add_fc4_type(lport, FC_TYPE_CT); + fc_fc4_conf_lport_params(lport, FC_TYPE_FCP); return 0; } @@ -1586,6 +1636,7 @@ int fc_lport_init(struct fc_lport *lport) fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT; if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT) fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT; + fc_fc4_add_lport(lport); return 0; } diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c index dd2b43bb1c70..f33b897e4784 100644 --- a/drivers/scsi/libfc/fc_npiv.c +++ b/drivers/scsi/libfc/fc_npiv.c @@ -37,9 +37,7 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize) vn_port = libfc_host_alloc(shost->hostt, privsize); if (!vn_port) - goto err_out; - if (fc_exch_mgr_list_clone(n_port, vn_port)) - goto err_put; + return vn_port; vn_port->vport = vport; vport->dd_data = vn_port; @@ -49,11 +47,6 @@ struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize) mutex_unlock(&n_port->lp_mutex); return vn_port; - -err_put: - scsi_host_put(vn_port->host); -err_out: - return NULL; } EXPORT_SYMBOL(libfc_vport_create); @@ -86,6 +79,7 @@ struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id) return lport; } +EXPORT_SYMBOL(fc_vport_id_lookup); /* * When setting the link state of vports during an lport state change, it's diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index a7175adab32d..49e1ccca09d5 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -58,7 +58,7 @@ #include "fc_libfc.h" -struct workqueue_struct *rport_event_queue; +static struct workqueue_struct *rport_event_queue; static void fc_rport_enter_flogi(struct fc_rport_priv *); static void fc_rport_enter_plogi(struct fc_rport_priv *); @@ -145,8 +145,10 @@ static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, rdata->maxframe_size = FC_MIN_MAX_PAYLOAD; INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); INIT_WORK(&rdata->event_work, fc_rport_work); - if (port_id != FC_FID_DIR_SERV) + if (port_id != FC_FID_DIR_SERV) { + rdata->lld_event_callback = lport->tt.rport_event_callback; list_add_rcu(&rdata->peers, &lport->disc.rports); + } return rdata; } @@ -257,6 +259,8 @@ static void fc_rport_work(struct work_struct *work) struct fc_rport_operations *rport_ops; struct fc_rport_identifiers ids; struct fc_rport *rport; + struct fc4_prov *prov; + u8 type; mutex_lock(&rdata->rp_mutex); event = rdata->event; @@ -300,12 +304,25 @@ static void fc_rport_work(struct work_struct *work) FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); } + if (rdata->lld_event_callback) { + FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); + rdata->lld_event_callback(lport, rdata, event); + } kref_put(&rdata->kref, lport->tt.rport_destroy); break; case RPORT_EV_FAILED: case RPORT_EV_LOGO: case RPORT_EV_STOP: + if (rdata->prli_count) { + mutex_lock(&fc_prov_mutex); + for (type = 1; type < FC_FC4_PROV_SIZE; type++) { + prov = fc_passive_prov[type]; + if (prov && prov->prlo) + prov->prlo(rdata); + } + mutex_unlock(&fc_prov_mutex); + } port_id = rdata->ids.port_id; mutex_unlock(&rdata->rp_mutex); @@ -313,6 +330,10 @@ static void fc_rport_work(struct work_struct *work) FC_RPORT_DBG(rdata, "callback ev %d\n", event); rport_ops->event_callback(lport, rdata, event); } + if (rdata->lld_event_callback) { + FC_RPORT_DBG(rdata, "lld callback ev %d\n", event); + rdata->lld_event_callback(lport, rdata, event); + } cancel_delayed_work_sync(&rdata->retry_work); /* @@ -336,6 +357,7 @@ static void fc_rport_work(struct work_struct *work) if (port_id == FC_FID_DIR_SERV) { rdata->event = RPORT_EV_NONE; mutex_unlock(&rdata->rp_mutex); + kref_put(&rdata->kref, lport->tt.rport_destroy); } else if ((rdata->flags & FC_RP_STARTED) && rdata->major_retries < lport->max_rport_retry_count) { @@ -575,7 +597,7 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata, /* make sure this isn't an FC_EX_CLOSED error, never retry those */ if (PTR_ERR(fp) == -FC_EX_CLOSED) - return fc_rport_error(rdata, fp); + goto out; if (rdata->retries < rdata->local_port->max_rport_retry_count) { FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n", @@ -588,7 +610,8 @@ static void fc_rport_error_retry(struct fc_rport_priv *rdata, return; } - return fc_rport_error(rdata, fp); +out: + fc_rport_error(rdata, fp); } /** @@ -878,6 +901,9 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn); rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn); + /* save plogi response sp_features for further reference */ + rdata->sp_features = ntohs(plp->fl_csp.sp_features); + if (lport->point_to_multipoint) fc_rport_login_complete(rdata, fp); csp_seq = ntohs(plp->fl_csp.sp_tot_seq); @@ -949,6 +975,8 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, struct fc_els_prli prli; struct fc_els_spp spp; } *pp; + struct fc_els_spp temp_spp; + struct fc4_prov *prov; u32 roles = FC_RPORT_ROLE_UNKNOWN; u32 fcp_parm = 0; u8 op; @@ -983,6 +1011,7 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x\n", pp->spp.spp_flags); + rdata->spp_type = pp->spp.spp_type; if (resp_code != FC_SPP_RESP_ACK) { if (resp_code == FC_SPP_RESP_CONF) fc_rport_error(rdata, fp); @@ -996,6 +1025,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, fcp_parm = ntohl(pp->spp.spp_params); if (fcp_parm & FCP_SPPF_RETRY) rdata->flags |= FC_RP_FLAGS_RETRY; + if (fcp_parm & FCP_SPPF_CONF_COMPL) + rdata->flags |= FC_RP_FLAGS_CONF_REQ; + + prov = fc_passive_prov[FC_TYPE_FCP]; + if (prov) { + memset(&temp_spp, 0, sizeof(temp_spp)); + prov->prli(rdata, pp->prli.prli_spp_len, + &pp->spp, &temp_spp); + } rdata->supported_classes = FC_COS_CLASS3; if (fcp_parm & FCP_SPPF_INIT_FCN) @@ -1033,6 +1071,7 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) struct fc_els_spp spp; } *pp; struct fc_frame *fp; + struct fc4_prov *prov; /* * If the rport is one of the well known addresses @@ -1054,9 +1093,20 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) return; } - if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI, - fc_rport_prli_resp, rdata, - 2 * lport->r_a_tov)) + fc_prli_fill(lport, fp); + + prov = fc_passive_prov[FC_TYPE_FCP]; + if (prov) { + pp = fc_frame_payload_get(fp, sizeof(*pp)); + prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp); + } + + fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id, + fc_host_port_id(lport->host), FC_TYPE_ELS, + FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + if (!lport->tt.exch_seq_send(lport, fp, fc_rport_prli_resp, + NULL, rdata, 2 * lport->r_a_tov)) fc_rport_error_retry(rdata, NULL); else kref_get(&rdata->kref); @@ -1642,9 +1692,9 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, unsigned int len; unsigned int plen; enum fc_els_spp_resp resp; + enum fc_els_spp_resp passive; struct fc_seq_els_data rjt_data; - u32 fcp_parm; - u32 roles = FC_RPORT_ROLE_UNKNOWN; + struct fc4_prov *prov; FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", fc_rport_state(rdata)); @@ -1678,46 +1728,42 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, pp->prli.prli_len = htons(len); len -= sizeof(struct fc_els_prli); - /* reinitialize remote port roles */ - rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; - /* * Go through all the service parameter pages and build * response. If plen indicates longer SPP than standard, * use that. The entire response has been pre-cleared above. */ spp = &pp->spp; + mutex_lock(&fc_prov_mutex); while (len >= plen) { + rdata->spp_type = rspp->spp_type; spp->spp_type = rspp->spp_type; spp->spp_type_ext = rspp->spp_type_ext; - spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR; - resp = FC_SPP_RESP_ACK; - - switch (rspp->spp_type) { - case 0: /* common to all FC-4 types */ - break; - case FC_TYPE_FCP: - fcp_parm = ntohl(rspp->spp_params); - if (fcp_parm & FCP_SPPF_RETRY) - rdata->flags |= FC_RP_FLAGS_RETRY; - rdata->supported_classes = FC_COS_CLASS3; - if (fcp_parm & FCP_SPPF_INIT_FCN) - roles |= FC_RPORT_ROLE_FCP_INITIATOR; - if (fcp_parm & FCP_SPPF_TARG_FCN) - roles |= FC_RPORT_ROLE_FCP_TARGET; - rdata->ids.roles = roles; - - spp->spp_params = htonl(lport->service_params); - break; - default: - resp = FC_SPP_RESP_INVL; - break; + resp = 0; + + if (rspp->spp_type < FC_FC4_PROV_SIZE) { + prov = fc_active_prov[rspp->spp_type]; + if (prov) + resp = prov->prli(rdata, plen, rspp, spp); + prov = fc_passive_prov[rspp->spp_type]; + if (prov) { + passive = prov->prli(rdata, plen, rspp, spp); + if (!resp || passive == FC_SPP_RESP_ACK) + resp = passive; + } + } + if (!resp) { + if (spp->spp_flags & FC_SPP_EST_IMG_PAIR) + resp |= FC_SPP_RESP_CONF; + else + resp |= FC_SPP_RESP_INVL; } spp->spp_flags |= resp; len -= plen; rspp = (struct fc_els_spp *)((char *)rspp + plen); spp = (struct fc_els_spp *)((char *)spp + plen); } + mutex_unlock(&fc_prov_mutex); /* * Send LS_ACC. If this fails, the originator should retry. @@ -1887,9 +1933,82 @@ int fc_rport_init(struct fc_lport *lport) EXPORT_SYMBOL(fc_rport_init); /** + * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator. + * @rdata: remote port private + * @spp_len: service parameter page length + * @rspp: received service parameter page + * @spp: response service parameter page + * + * Returns the value for the response code to be placed in spp_flags; + * Returns 0 if not an initiator. + */ +static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *rspp, + struct fc_els_spp *spp) +{ + struct fc_lport *lport = rdata->local_port; + u32 fcp_parm; + + fcp_parm = ntohl(rspp->spp_params); + rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN; + if (fcp_parm & FCP_SPPF_INIT_FCN) + rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; + if (fcp_parm & FCP_SPPF_TARG_FCN) + rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET; + if (fcp_parm & FCP_SPPF_RETRY) + rdata->flags |= FC_RP_FLAGS_RETRY; + rdata->supported_classes = FC_COS_CLASS3; + + if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR)) + return 0; + + spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR; + + /* + * OR in our service parameters with other providers (target), if any. + */ + fcp_parm = ntohl(spp->spp_params); + spp->spp_params = htonl(fcp_parm | lport->service_params); + return FC_SPP_RESP_ACK; +} + +/* + * FC-4 provider ops for FCP initiator. + */ +struct fc4_prov fc_rport_fcp_init = { + .prli = fc_rport_fcp_prli, +}; + +/** + * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0 + * @rdata: remote port private + * @spp_len: service parameter page length + * @rspp: received service parameter page + * @spp: response service parameter page + */ +static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len, + const struct fc_els_spp *rspp, + struct fc_els_spp *spp) +{ + if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR) + return FC_SPP_RESP_INVL; + return FC_SPP_RESP_ACK; +} + +/* + * FC-4 provider ops for type 0 service parameters. + * + * This handles the special case of type 0 which is always successful + * but doesn't do anything otherwise. + */ +struct fc4_prov fc_rport_t0_prov = { + .prli = fc_rport_t0_prli, +}; + +/** * fc_setup_rport() - Initialize the rport_event_queue */ -int fc_setup_rport() +int fc_setup_rport(void) { rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); if (!rport_event_queue) @@ -1900,7 +2019,7 @@ int fc_setup_rport() /** * fc_destroy_rport() - Destroy the rport_event_queue */ -void fc_destroy_rport() +void fc_destroy_rport(void) { destroy_workqueue(rport_event_queue); } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index da8b61543ee4..0c550d5b9133 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -3352,6 +3352,47 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, } EXPORT_SYMBOL_GPL(iscsi_session_get_param); +int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, + enum iscsi_param param, char *buf) +{ + struct sockaddr_in6 *sin6 = NULL; + struct sockaddr_in *sin = NULL; + int len; + + switch (addr->ss_family) { + case AF_INET: + sin = (struct sockaddr_in *)addr; + break; + case AF_INET6: + sin6 = (struct sockaddr_in6 *)addr; + break; + default: + return -EINVAL; + } + + switch (param) { + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + if (sin) + len = sprintf(buf, "%pI4\n", &sin->sin_addr.s_addr); + else + len = sprintf(buf, "%pI6\n", &sin6->sin6_addr); + break; + case ISCSI_PARAM_CONN_PORT: + if (sin) + len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port)); + else + len = sprintf(buf, "%hu\n", + be16_to_cpu(sin6->sin6_port)); + break; + default: + return -EINVAL; + } + + return len; +} +EXPORT_SYMBOL_GPL(iscsi_conn_get_addr_param); + int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { @@ -3416,9 +3457,6 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, case ISCSI_HOST_PARAM_INITIATOR_NAME: len = sprintf(buf, "%s\n", ihost->initiatorname); break; - case ISCSI_HOST_PARAM_IPADDRESS: - len = sprintf(buf, "%s\n", ihost->local_address); - break; default: return -ENOSYS; } diff --git a/drivers/scsi/libsas/Kconfig b/drivers/scsi/libsas/Kconfig index 18f33cd54411..9dafe64e7c7a 100644 --- a/drivers/scsi/libsas/Kconfig +++ b/drivers/scsi/libsas/Kconfig @@ -46,11 +46,3 @@ config SCSI_SAS_HOST_SMP Allows sas hosts to receive SMP frames. Selecting this option builds an SMP interpreter into libsas. Say N here if you want to save the few kb this consumes. - -config SCSI_SAS_LIBSAS_DEBUG - bool "Compile the SAS Domain Transport Attributes in debug mode" - default y - depends on SCSI_SAS_LIBSAS - help - Compiles the SAS Layer in debug mode. In debug mode, the - SAS Layer prints diagnostic and debug messages. diff --git a/drivers/scsi/libsas/Makefile b/drivers/scsi/libsas/Makefile index 1ad1323c60fa..566a10024598 100644 --- a/drivers/scsi/libsas/Makefile +++ b/drivers/scsi/libsas/Makefile @@ -21,10 +21,6 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 # USA -ifeq ($(CONFIG_SCSI_SAS_LIBSAS_DEBUG),y) - EXTRA_CFLAGS += -DSAS_DEBUG -endif - obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas.o libsas-y += sas_init.o \ sas_phy.o \ diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 4d3b704ede1c..31fc21f4d831 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -71,13 +71,13 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts) case SAS_SG_ERR: return AC_ERR_INVALID; - case SAM_STAT_CHECK_CONDITION: case SAS_OPEN_TO: case SAS_OPEN_REJECT: SAS_DPRINTK("%s: Saw error %d. What to do?\n", __func__, ts->stat); return AC_ERR_OTHER; + case SAM_STAT_CHECK_CONDITION: case SAS_ABORTED_TASK: return AC_ERR_DEV; @@ -107,13 +107,15 @@ static void sas_ata_task_done(struct sas_task *task) sas_ha = dev->port->ha; spin_lock_irqsave(dev->sata_dev.ap->lock, flags); - if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD) { + if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || + ((stat->stat == SAM_STAT_CHECK_CONDITION && + dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf); qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command); dev->sata_dev.sstatus = resp->sstatus; dev->sata_dev.serror = resp->serror; dev->sata_dev.scontrol = resp->scontrol; - } else if (stat->stat != SAM_STAT_GOOD) { + } else { ac = sas_to_ata_err(stat); if (ac) { SAS_DPRINTK("%s: SAS error %x\n", __func__, @@ -305,55 +307,6 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc) } } -static int sas_ata_scr_write(struct ata_link *link, unsigned int sc_reg_in, - u32 val) -{ - struct domain_device *dev = link->ap->private_data; - - SAS_DPRINTK("STUB %s\n", __func__); - switch (sc_reg_in) { - case SCR_STATUS: - dev->sata_dev.sstatus = val; - break; - case SCR_CONTROL: - dev->sata_dev.scontrol = val; - break; - case SCR_ERROR: - dev->sata_dev.serror = val; - break; - case SCR_ACTIVE: - dev->sata_dev.ap->link.sactive = val; - break; - default: - return -EINVAL; - } - return 0; -} - -static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in, - u32 *val) -{ - struct domain_device *dev = link->ap->private_data; - - SAS_DPRINTK("STUB %s\n", __func__); - switch (sc_reg_in) { - case SCR_STATUS: - *val = dev->sata_dev.sstatus; - return 0; - case SCR_CONTROL: - *val = dev->sata_dev.scontrol; - return 0; - case SCR_ERROR: - *val = dev->sata_dev.serror; - return 0; - case SCR_ACTIVE: - *val = dev->sata_dev.ap->link.sactive; - return 0; - default: - return -EINVAL; - } -} - static struct ata_port_operations sas_sata_ops = { .prereset = ata_std_prereset, .softreset = NULL, @@ -367,8 +320,6 @@ static struct ata_port_operations sas_sata_ops = { .qc_fill_rtf = sas_ata_qc_fill_rtf, .port_start = ata_sas_port_start, .port_stop = ata_sas_port_stop, - .scr_read = sas_ata_scr_read, - .scr_write = sas_ata_scr_write }; static struct ata_port_info sata_port_info = { @@ -801,7 +752,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost) if (!dev_is_sata(ddev)) continue; - + ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler"); ata_scsi_port_error_handler(shost, ap); } @@ -834,13 +785,13 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, LIST_HEAD(sata_q); ap = NULL; - + list_for_each_entry_safe(cmd, n, work_q, eh_entry) { struct domain_device *ddev = cmd_to_domain_dev(cmd); if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd)) continue; - if(ap && ap != ddev->sata_dev.ap) + if (ap && ap != ddev->sata_dev.ap) continue; ap = ddev->sata_dev.ap; rtn = 1; @@ -848,8 +799,21 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, } if (!list_empty(&sata_q)) { - ata_port_printk(ap, KERN_DEBUG,"sas eh calling libata cmd error handler\n"); + ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n"); ata_scsi_cmd_error_handler(shost, ap, &sata_q); + /* + * ata's error handler may leave the cmd on the list + * so make sure they don't remain on a stack list + * about to go out of scope. + * + * This looks strange, since the commands are + * now part of no list, but the next error + * action will be ata_port_error_handler() + * which takes no list and sweeps them up + * anyway from the ata tag array. + */ + while (!list_empty(&sata_q)) + list_del_init(sata_q.next); } } while (ap); diff --git a/drivers/scsi/libsas/sas_dump.c b/drivers/scsi/libsas/sas_dump.c index c17c25030f1c..fc460933575c 100644 --- a/drivers/scsi/libsas/sas_dump.c +++ b/drivers/scsi/libsas/sas_dump.c @@ -24,8 +24,6 @@ #include "sas_dump.h" -#ifdef SAS_DEBUG - static const char *sas_hae_str[] = { [0] = "HAE_RESET", }; @@ -72,5 +70,3 @@ void sas_dump_port(struct asd_sas_port *port) SAS_DPRINTK("port%d: oob_mode:0x%x\n", port->id, port->oob_mode); SAS_DPRINTK("port%d: num_phys:%d\n", port->id, port->num_phys); } - -#endif /* SAS_DEBUG */ diff --git a/drivers/scsi/libsas/sas_dump.h b/drivers/scsi/libsas/sas_dump.h index 47b45d4f5258..800e4c69093f 100644 --- a/drivers/scsi/libsas/sas_dump.h +++ b/drivers/scsi/libsas/sas_dump.h @@ -24,19 +24,7 @@ #include "sas_internal.h" -#ifdef SAS_DEBUG - void sas_dprint_porte(int phyid, enum port_event pe); void sas_dprint_phye(int phyid, enum phy_event pe); void sas_dprint_hae(struct sas_ha_struct *sas_ha, enum ha_event he); void sas_dump_port(struct asd_sas_port *port); - -#else /* SAS_DEBUG */ - -static inline void sas_dprint_porte(int phyid, enum port_event pe) { } -static inline void sas_dprint_phye(int phyid, enum phy_event pe) { } -static inline void sas_dprint_hae(struct sas_ha_struct *sas_ha, - enum ha_event he) { } -static inline void sas_dump_port(struct asd_sas_port *port) { } - -#endif /* SAS_DEBUG */ diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 505ffe358293..f3f693b772ac 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -244,6 +244,11 @@ static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req, * dev to host FIS as described in section G.5 of * sas-2 r 04b */ dr = &((struct smp_resp *)disc_resp)->disc; + if (memcmp(dev->sas_addr, dr->attached_sas_addr, + SAS_ADDR_SIZE) == 0) { + sas_printk("Found loopback topology, just ignore it!\n"); + return 0; + } if (!(dr->attached_dev_type == 0 && dr->attached_sata_dev)) break; diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 0001374bd6b2..8b538bd1ff2b 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -33,11 +33,7 @@ #define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__) -#ifdef SAS_DEBUG -#define SAS_DPRINTK(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__) -#else -#define SAS_DPRINTK(fmt, ...) -#endif +#define SAS_DPRINTK(fmt, ...) printk(KERN_DEBUG "sas: " fmt, ## __VA_ARGS__) #define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble) #define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0) diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 67758ea8eb7f..f6e189f40917 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -681,11 +681,10 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) { struct sas_task *task = TO_SAS_TASK(cmd); unsigned long flags; - enum blk_eh_timer_return rtn; + enum blk_eh_timer_return rtn; if (sas_ata_timed_out(cmd, task, &rtn)) return rtn; - if (!task) { cmd->request->timeout /= 2; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 746dd3d7a092..b64c6da870d3 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -325,6 +325,7 @@ struct lpfc_vport { #define FC_VPORT_CVL_RCVD 0x400000 /* VLink failed due to CVL */ #define FC_VFI_REGISTERED 0x800000 /* VFI is registered */ #define FC_FDISC_COMPLETED 0x1000000/* FDISC completed */ +#define FC_DISC_DELAYED 0x2000000/* Delay NPort discovery */ uint32_t ct_flags; #define FC_CT_RFF_ID 0x1 /* RFF_ID accepted by switch */ @@ -348,6 +349,8 @@ struct lpfc_vport { uint32_t fc_myDID; /* fibre channel S_ID */ uint32_t fc_prevDID; /* previous fibre channel S_ID */ + struct lpfc_name fabric_portname; + struct lpfc_name fabric_nodename; int32_t stopped; /* HBA has not been restarted since last ERATT */ uint8_t fc_linkspeed; /* Link speed after last READ_LA */ @@ -372,6 +375,7 @@ struct lpfc_vport { #define WORKER_DISC_TMO 0x1 /* vport: Discovery timeout */ #define WORKER_ELS_TMO 0x2 /* vport: ELS timeout */ #define WORKER_FDMI_TMO 0x4 /* vport: FDMI timeout */ +#define WORKER_DELAYED_DISC_TMO 0x8 /* vport: delayed discovery */ #define WORKER_MBOX_TMO 0x100 /* hba: MBOX timeout */ #define WORKER_HB_TMO 0x200 /* hba: Heart beat timeout */ @@ -382,6 +386,7 @@ struct lpfc_vport { struct timer_list fc_fdmitmo; struct timer_list els_tmofunc; + struct timer_list delayed_disc_tmo; int unreg_vpi_cmpl; @@ -548,6 +553,8 @@ struct lpfc_hba { #define LPFC_SLI3_CRP_ENABLED 0x08 #define LPFC_SLI3_BG_ENABLED 0x20 #define LPFC_SLI3_DSS_ENABLED 0x40 +#define LPFC_SLI4_PERFH_ENABLED 0x80 +#define LPFC_SLI4_PHWQ_ENABLED 0x100 uint32_t iocb_cmd_size; uint32_t iocb_rsp_size; @@ -655,7 +662,7 @@ struct lpfc_hba { #define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */ #define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */ #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */ - + uint32_t cfg_enable_dss; lpfc_vpd_t vpd; /* vital product data */ struct pci_dev *pcidev; @@ -792,6 +799,10 @@ struct lpfc_hba { struct dentry *debug_slow_ring_trc; struct lpfc_debugfs_trc *slow_ring_trc; atomic_t slow_ring_trc_cnt; + /* iDiag debugfs sub-directory */ + struct dentry *idiag_root; + struct dentry *idiag_pci_cfg; + struct dentry *idiag_que_info; #endif /* Used for deferred freeing of ELS data buffers */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 3512abb8a587..e7c020df12fa 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -623,10 +623,14 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) int status = 0; int cnt = 0; int i; + int rc; init_completion(&online_compl); - lpfc_workq_post_event(phba, &status, &online_compl, + rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (status != 0) @@ -652,7 +656,10 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) } init_completion(&online_compl); - lpfc_workq_post_event(phba, &status, &online_compl, type); + rc = lpfc_workq_post_event(phba, &status, &online_compl, type); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (status != 0) @@ -671,6 +678,7 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) * * Notes: * Assumes any error from lpfc_do_offline() will be negative. + * Do not make this function static. * * Returns: * lpfc_do_offline() return code if not zero @@ -682,6 +690,7 @@ lpfc_selective_reset(struct lpfc_hba *phba) { struct completion online_compl; int status = 0; + int rc; if (!phba->cfg_enable_hba_reset) return -EIO; @@ -692,8 +701,11 @@ lpfc_selective_reset(struct lpfc_hba *phba) return status; init_completion(&online_compl); - lpfc_workq_post_event(phba, &status, &online_compl, + rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (status != 0) @@ -812,14 +824,17 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, struct lpfc_hba *phba = vport->phba; struct completion online_compl; int status=0; + int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; init_completion(&online_compl); if(strncmp(buf, "online", sizeof("online") - 1) == 0) { - lpfc_workq_post_event(phba, &status, &online_compl, + rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; wait_for_completion(&online_compl); } else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0) status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); @@ -1279,6 +1294,28 @@ lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr, } /** + * lpfc_dss_show - Return the current state of dss and the configured state + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains the formatted text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_dss_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return snprintf(buf, PAGE_SIZE, "%s - %sOperational\n", + (phba->cfg_enable_dss) ? "Enabled" : "Disabled", + (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ? + "" : "Not "); +} + +/** * lpfc_param_show - Return a cfg attribute value in decimal * * Description: @@ -1597,13 +1634,13 @@ lpfc_##attr##_store(struct device *dev, struct device_attribute *attr, \ #define LPFC_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_init(name, defval, minval, maxval) #define LPFC_ATTR_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1611,7 +1648,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_ATTR_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1622,7 +1659,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ #define LPFC_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_hex_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1630,7 +1667,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_param_hex_show(name)\ lpfc_param_init(name, defval, minval, maxval)\ @@ -1641,13 +1678,13 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ #define LPFC_VPORT_ATTR(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_init(name, defval, minval, maxval) #define LPFC_VPORT_ATTR_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1655,7 +1692,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_VPORT_ATTR_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1666,7 +1703,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO | S_IWUSR,\ #define LPFC_VPORT_ATTR_HEX_R(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_hex_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1674,7 +1711,7 @@ static DEVICE_ATTR(lpfc_##name, S_IRUGO , lpfc_##name##_show, NULL) #define LPFC_VPORT_ATTR_HEX_RW(name, defval, minval, maxval, desc) \ static uint lpfc_##name = defval;\ -module_param(lpfc_##name, uint, 0);\ +module_param(lpfc_##name, uint, S_IRUGO);\ MODULE_PARM_DESC(lpfc_##name, desc);\ lpfc_vport_param_hex_show(name)\ lpfc_vport_param_init(name, defval, minval, maxval)\ @@ -1718,7 +1755,7 @@ static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL); static DEVICE_ATTR(lpfc_temp_sensor, S_IRUGO, lpfc_temp_sensor_show, NULL); static DEVICE_ATTR(lpfc_fips_level, S_IRUGO, lpfc_fips_level_show, NULL); static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL); - +static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL); static char *lpfc_soft_wwn_key = "C99G71SL8032A"; @@ -1813,6 +1850,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, int stat1=0, stat2=0; unsigned int i, j, cnt=count; u8 wwpn[8]; + int rc; if (!phba->cfg_enable_hba_reset) return -EACCES; @@ -1863,7 +1901,11 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr, "0463 lpfc_soft_wwpn attribute set failed to " "reinit adapter - %d\n", stat1); init_completion(&online_compl); - lpfc_workq_post_event(phba, &stat2, &online_compl, LPFC_EVT_ONLINE); + rc = lpfc_workq_post_event(phba, &stat2, &online_compl, + LPFC_EVT_ONLINE); + if (rc == 0) + return -ENOMEM; + wait_for_completion(&online_compl); if (stat2) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -1954,7 +1996,7 @@ static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\ static int lpfc_poll = 0; -module_param(lpfc_poll, int, 0); +module_param(lpfc_poll, int, S_IRUGO); MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:" " 0 - none," " 1 - poll with interrupts enabled" @@ -1964,21 +2006,21 @@ static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR, lpfc_poll_show, lpfc_poll_store); int lpfc_sli_mode = 0; -module_param(lpfc_sli_mode, int, 0); +module_param(lpfc_sli_mode, int, S_IRUGO); MODULE_PARM_DESC(lpfc_sli_mode, "SLI mode selector:" " 0 - auto (SLI-3 if supported)," " 2 - select SLI-2 even on SLI-3 capable HBAs," " 3 - select SLI-3"); int lpfc_enable_npiv = 1; -module_param(lpfc_enable_npiv, int, 0); +module_param(lpfc_enable_npiv, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_npiv, "Enable NPIV functionality"); lpfc_param_show(enable_npiv); lpfc_param_init(enable_npiv, 1, 0, 1); static DEVICE_ATTR(lpfc_enable_npiv, S_IRUGO, lpfc_enable_npiv_show, NULL); int lpfc_enable_rrq; -module_param(lpfc_enable_rrq, int, 0); +module_param(lpfc_enable_rrq, int, S_IRUGO); MODULE_PARM_DESC(lpfc_enable_rrq, "Enable RRQ functionality"); lpfc_param_show(enable_rrq); lpfc_param_init(enable_rrq, 0, 0, 1); @@ -2040,7 +2082,7 @@ static DEVICE_ATTR(txcmplq_hw, S_IRUGO, lpfc_txcmplq_hw_show, NULL); int lpfc_iocb_cnt = 2; -module_param(lpfc_iocb_cnt, int, 1); +module_param(lpfc_iocb_cnt, int, S_IRUGO); MODULE_PARM_DESC(lpfc_iocb_cnt, "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs"); lpfc_param_show(iocb_cnt); @@ -2192,7 +2234,7 @@ static DEVICE_ATTR(lpfc_nodev_tmo, S_IRUGO | S_IWUSR, # disappear until the timer expires. Value range is [0,255]. Default # value is 30. */ -module_param(lpfc_devloss_tmo, int, 0); +module_param(lpfc_devloss_tmo, int, S_IRUGO); MODULE_PARM_DESC(lpfc_devloss_tmo, "Seconds driver will hold I/O waiting " "for a device to come back"); @@ -2302,7 +2344,7 @@ LPFC_VPORT_ATTR_R(peer_port_login, 0, 0, 1, # Default value of this parameter is 1. */ static int lpfc_restrict_login = 1; -module_param(lpfc_restrict_login, int, 0); +module_param(lpfc_restrict_login, int, S_IRUGO); MODULE_PARM_DESC(lpfc_restrict_login, "Restrict virtual ports login to remote initiators."); lpfc_vport_param_show(restrict_login); @@ -2473,7 +2515,7 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, return -EINVAL; } static int lpfc_topology = 0; -module_param(lpfc_topology, int, 0); +module_param(lpfc_topology, int, S_IRUGO); MODULE_PARM_DESC(lpfc_topology, "Select Fibre Channel topology"); lpfc_param_show(topology) lpfc_param_init(topology, 0, 0, 6) @@ -2915,7 +2957,7 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr, } static int lpfc_link_speed = 0; -module_param(lpfc_link_speed, int, 0); +module_param(lpfc_link_speed, int, S_IRUGO); MODULE_PARM_DESC(lpfc_link_speed, "Select link speed"); lpfc_param_show(link_speed) @@ -3043,7 +3085,7 @@ lpfc_aer_support_store(struct device *dev, struct device_attribute *attr, } static int lpfc_aer_support = 1; -module_param(lpfc_aer_support, int, 1); +module_param(lpfc_aer_support, int, S_IRUGO); MODULE_PARM_DESC(lpfc_aer_support, "Enable PCIe device AER support"); lpfc_param_show(aer_support) @@ -3155,7 +3197,7 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1, # The value is set in milliseconds. */ static int lpfc_max_scsicmpl_time; -module_param(lpfc_max_scsicmpl_time, int, 0); +module_param(lpfc_max_scsicmpl_time, int, S_IRUGO); MODULE_PARM_DESC(lpfc_max_scsicmpl_time, "Use command completion time to control queue depth"); lpfc_vport_param_show(max_scsicmpl_time); @@ -3331,7 +3373,7 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); */ unsigned int lpfc_prot_mask = SHOST_DIF_TYPE1_PROTECTION; -module_param(lpfc_prot_mask, uint, 0); +module_param(lpfc_prot_mask, uint, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); /* @@ -3343,9 +3385,28 @@ MODULE_PARM_DESC(lpfc_prot_mask, "host protection mask"); # */ unsigned char lpfc_prot_guard = SHOST_DIX_GUARD_IP; -module_param(lpfc_prot_guard, byte, 0); +module_param(lpfc_prot_guard, byte, S_IRUGO); MODULE_PARM_DESC(lpfc_prot_guard, "host protection guard type"); +/* + * Delay initial NPort discovery when Clean Address bit is cleared in + * FLOGI/FDISC accept and FCID/Fabric name/Fabric portname is changed. + * This parameter can have value 0 or 1. + * When this parameter is set to 0, no delay is added to the initial + * discovery. + * When this parameter is set to non-zero value, initial Nport discovery is + * delayed by ra_tov seconds when Clean Address bit is cleared in FLOGI/FDISC + * accept and FCID/Fabric name/Fabric portname is changed. + * Driver always delay Nport discovery for subsequent FLOGI/FDISC completion + * when Clean Address bit is cleared in FLOGI/FDISC + * accept and FCID/Fabric name/Fabric portname is changed. + * Default value is 0. + */ +int lpfc_delay_discovery; +module_param(lpfc_delay_discovery, int, S_IRUGO); +MODULE_PARM_DESC(lpfc_delay_discovery, + "Delay NPort discovery when Clean Address bit is cleared. " + "Allowed values: 0,1."); /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count @@ -3437,6 +3498,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_txcmplq_hw, &dev_attr_lpfc_fips_level, &dev_attr_lpfc_fips_rev, + &dev_attr_lpfc_dss, NULL, }; @@ -4639,6 +4701,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_aer_support_init(phba, lpfc_aer_support); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); + phba->cfg_enable_dss = 1; return; } diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 17fde522c84a..3d40023f4804 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -53,9 +53,9 @@ void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_supported_pages(struct lpfcMboxq *); -void lpfc_sli4_params(struct lpfcMboxq *); +void lpfc_pc_sli4_params(struct lpfcMboxq *); int lpfc_pc_sli4_params_get(struct lpfc_hba *, LPFC_MBOXQ_t *); - +int lpfc_get_sli4_parameters(struct lpfc_hba *, LPFC_MBOXQ_t *); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); void lpfc_cleanup_rcv_buffers(struct lpfc_vport *); void lpfc_rcv_seq_check_edtov(struct lpfc_vport *); @@ -167,6 +167,8 @@ int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int); void lpfc_fdmi_tmo(unsigned long); void lpfc_fdmi_timeout_handler(struct lpfc_vport *); +void lpfc_delayed_disc_tmo(unsigned long); +void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); int lpfc_config_port_prep(struct lpfc_hba *); int lpfc_config_port_post(struct lpfc_hba *); @@ -341,6 +343,7 @@ extern struct fc_function_template lpfc_transport_functions; extern struct fc_function_template lpfc_vport_transport_functions; extern int lpfc_sli_mode; extern int lpfc_enable_npiv; +extern int lpfc_delay_discovery; int lpfc_vport_symbolic_node_name(struct lpfc_vport *, char *, size_t); int lpfc_vport_symbolic_port_name(struct lpfc_vport *, char *, size_t); @@ -423,6 +426,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *); int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, uint16_t, uint16_t, uint16_t); void lpfc_cleanup_wt_rrqs(struct lpfc_hba *); -void lpfc_cleanup_vports_rrqs(struct lpfc_vport *); +void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, uint32_t); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index c004fa9a681e..d9edfd90d7ff 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1738,6 +1738,55 @@ fdmi_cmd_exit: return 1; } +/** + * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer. + * @ptr - Context object of the timer. + * + * This function set the WORKER_DELAYED_DISC_TMO flag and wake up + * the worker thread. + **/ +void +lpfc_delayed_disc_tmo(unsigned long ptr) +{ + struct lpfc_vport *vport = (struct lpfc_vport *)ptr; + struct lpfc_hba *phba = vport->phba; + uint32_t tmo_posted; + unsigned long iflag; + + spin_lock_irqsave(&vport->work_port_lock, iflag); + tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO; + if (!tmo_posted) + vport->work_port_events |= WORKER_DELAYED_DISC_TMO; + spin_unlock_irqrestore(&vport->work_port_lock, iflag); + + if (!tmo_posted) + lpfc_worker_wake_up(phba); + return; +} + +/** + * lpfc_delayed_disc_timeout_handler - Function called by worker thread to + * handle delayed discovery. + * @vport: pointer to a host virtual N_Port data structure. + * + * This function start nport discovery of the vport. + **/ +void +lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport) +{ + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + spin_lock_irq(shost->host_lock); + if (!(vport->fc_flag & FC_DISC_DELAYED)) { + spin_unlock_irq(shost->host_lock); + return; + } + vport->fc_flag &= ~FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + + lpfc_do_scr_ns_plogi(vport->phba, vport); +} + void lpfc_fdmi_tmo(unsigned long ptr) { diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a80d938fafc9..a753581509d6 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2009 Emulex. All rights reserved. * + * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -57,8 +57,8 @@ * # mount -t debugfs none /sys/kernel/debug * * The lpfc debugfs directory hierarchy is: - * lpfc/lpfcX/vportY - * where X is the lpfc hba unique_id + * /sys/kernel/debug/lpfc/fnX/vportY + * where X is the lpfc hba function unique_id * where Y is the vport VPI on that hba * * Debugging services available per vport: @@ -82,52 +82,34 @@ * the HBA. X MUST also be a power of 2. */ static int lpfc_debugfs_enable = 1; -module_param(lpfc_debugfs_enable, int, 0); +module_param(lpfc_debugfs_enable, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_disc_trc; -module_param(lpfc_debugfs_max_disc_trc, int, 0); +module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc, "Set debugfs discovery trace depth"); /* This MUST be a power of 2 */ static int lpfc_debugfs_max_slow_ring_trc; -module_param(lpfc_debugfs_max_slow_ring_trc, int, 0); +module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc, "Set debugfs slow ring trace depth"); static int lpfc_debugfs_mask_disc_trc; -module_param(lpfc_debugfs_mask_disc_trc, int, 0); +module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO); MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc, "Set debugfs discovery trace mask"); #include <linux/debugfs.h> -/* size of output line, for discovery_trace and slow_ring_trace */ -#define LPFC_DEBUG_TRC_ENTRY_SIZE 100 - -/* nodelist output buffer size */ -#define LPFC_NODELIST_SIZE 8192 -#define LPFC_NODELIST_ENTRY_SIZE 120 - -/* dumpHBASlim output buffer size */ -#define LPFC_DUMPHBASLIM_SIZE 4096 - -/* dumpHostSlim output buffer size */ -#define LPFC_DUMPHOSTSLIM_SIZE 4096 - -/* hbqinfo output buffer size */ -#define LPFC_HBQINFO_SIZE 8192 - -struct lpfc_debug { - char *buffer; - int len; -}; - static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0); static unsigned long lpfc_debugfs_start_time = 0L; +/* iDiag */ +static struct lpfc_idiag idiag; + /** * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer * @vport: The vport to gather the log info from. @@ -996,8 +978,6 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf, return nbytes; } - - /** * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file * @inode: The inode pointer that contains a vport pointer. @@ -1099,6 +1079,7 @@ lpfc_debugfs_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct lpfc_debug *debug = file->private_data; + return simple_read_from_buffer(buf, nbytes, ppos, debug->buffer, debug->len); } @@ -1137,6 +1118,776 @@ lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) return 0; } +/* + * iDiag debugfs file access methods + */ + +/* + * iDiag PCI config space register access methods: + * + * The PCI config space register accessees of read, write, read-modify-write + * for set bits, and read-modify-write for clear bits to SLI4 PCI functions + * are provided. In the proper SLI4 PCI function's debugfs iDiag directory, + * + * /sys/kernel/debug/lpfc/fn<#>/iDiag + * + * the access is through the debugfs entry pciCfg: + * + * 1. For PCI config space register read access, there are two read methods: + * A) read a single PCI config space register in the size of a byte + * (8 bits), a word (16 bits), or a dword (32 bits); or B) browse through + * the 4K extended PCI config space. + * + * A) Read a single PCI config space register consists of two steps: + * + * Step-1: Set up PCI config space register read command, the command + * syntax is, + * + * echo 1 <where> <count> > pciCfg + * + * where, 1 is the iDiag command for PCI config space read, <where> is the + * offset from the beginning of the device's PCI config space to read from, + * and <count> is the size of PCI config space register data to read back, + * it will be 1 for reading a byte (8 bits), 2 for reading a word (16 bits + * or 2 bytes), or 4 for reading a dword (32 bits or 4 bytes). + * + * Setp-2: Perform the debugfs read operation to execute the idiag command + * set up in Step-1, + * + * cat pciCfg + * + * Examples: + * To read PCI device's vendor-id and device-id from PCI config space, + * + * echo 1 0 4 > pciCfg + * cat pciCfg + * + * To read PCI device's currnt command from config space, + * + * echo 1 4 2 > pciCfg + * cat pciCfg + * + * B) Browse through the entire 4K extended PCI config space also consists + * of two steps: + * + * Step-1: Set up PCI config space register browsing command, the command + * syntax is, + * + * echo 1 0 4096 > pciCfg + * + * where, 1 is the iDiag command for PCI config space read, 0 must be used + * as the offset for PCI config space register browse, and 4096 must be + * used as the count for PCI config space register browse. + * + * Step-2: Repeately issue the debugfs read operation to browse through + * the entire PCI config space registers: + * + * cat pciCfg + * cat pciCfg + * cat pciCfg + * ... + * + * When browsing to the end of the 4K PCI config space, the browse method + * shall wrap around to start reading from beginning again, and again... + * + * 2. For PCI config space register write access, it supports a single PCI + * config space register write in the size of a byte (8 bits), a word + * (16 bits), or a dword (32 bits). The command syntax is, + * + * echo 2 <where> <count> <value> > pciCfg + * + * where, 2 is the iDiag command for PCI config space write, <where> is + * the offset from the beginning of the device's PCI config space to write + * into, <count> is the size of data to write into the PCI config space, + * it will be 1 for writing a byte (8 bits), 2 for writing a word (16 bits + * or 2 bytes), or 4 for writing a dword (32 bits or 4 bytes), and <value> + * is the data to be written into the PCI config space register at the + * offset. + * + * Examples: + * To disable PCI device's interrupt assertion, + * + * 1) Read in device's PCI config space register command field <cmd>: + * + * echo 1 4 2 > pciCfg + * cat pciCfg + * + * 2) Set bit 10 (Interrupt Disable bit) in the <cmd>: + * + * <cmd> = <cmd> | (1 < 10) + * + * 3) Write the modified command back: + * + * echo 2 4 2 <cmd> > pciCfg + * + * 3. For PCI config space register set bits access, it supports a single PCI + * config space register set bits in the size of a byte (8 bits), a word + * (16 bits), or a dword (32 bits). The command syntax is, + * + * echo 3 <where> <count> <bitmask> > pciCfg + * + * where, 3 is the iDiag command for PCI config space set bits, <where> is + * the offset from the beginning of the device's PCI config space to set + * bits into, <count> is the size of the bitmask to set into the PCI config + * space, it will be 1 for setting a byte (8 bits), 2 for setting a word + * (16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 bytes), and + * <bitmask> is the bitmask, indicating the bits to be set into the PCI + * config space register at the offset. The logic performed to the content + * of the PCI config space register, regval, is, + * + * regval |= <bitmask> + * + * 4. For PCI config space register clear bits access, it supports a single + * PCI config space register clear bits in the size of a byte (8 bits), + * a word (16 bits), or a dword (32 bits). The command syntax is, + * + * echo 4 <where> <count> <bitmask> > pciCfg + * + * where, 4 is the iDiag command for PCI config space clear bits, <where> + * is the offset from the beginning of the device's PCI config space to + * clear bits from, <count> is the size of the bitmask to set into the PCI + * config space, it will be 1 for setting a byte (8 bits), 2 for setting + * a word(16 bits or 2 bytes), or 4 for setting a dword (32 bits or 4 + * bytes), and <bitmask> is the bitmask, indicating the bits to be cleared + * from the PCI config space register at the offset. the logic performed + * to the content of the PCI config space register, regval, is, + * + * regval &= ~<bitmask> + * + * Note, for all single register read, write, set bits, or clear bits access, + * the offset (<where>) must be aligned with the size of the data: + * + * For data size of byte (8 bits), the offset must be aligned to the byte + * boundary; for data size of word (16 bits), the offset must be aligned + * to the word boundary; while for data size of dword (32 bits), the offset + * must be aligned to the dword boundary. Otherwise, the interface will + * return the error: + * + * "-bash: echo: write error: Invalid argument". + * + * For example: + * + * echo 1 2 4 > pciCfg + * -bash: echo: write error: Invalid argument + * + * Note also, all of the numbers in the command fields for all read, write, + * set bits, and clear bits PCI config space register command fields can be + * either decimal or hex. + * + * For example, + * echo 1 0 4096 > pciCfg + * + * will be the same as + * echo 1 0 0x1000 > pciCfg + * + * And, + * echo 2 155 1 10 > pciCfg + * + * will be + * echo 2 0x9b 1 0xa > pciCfg + */ + +/** + * lpfc_idiag_cmd_get - Get and parse idiag debugfs comands from user space + * @buf: The pointer to the user space buffer. + * @nbytes: The number of bytes in the user space buffer. + * @idiag_cmd: pointer to the idiag command struct. + * + * This routine reads data from debugfs user space buffer and parses the + * buffer for getting the idiag command and arguments. The while space in + * between the set of data is used as the parsing separator. + * + * This routine returns 0 when successful, it returns proper error code + * back to the user space in error conditions. + */ +static int lpfc_idiag_cmd_get(const char __user *buf, size_t nbytes, + struct lpfc_idiag_cmd *idiag_cmd) +{ + char mybuf[64]; + char *pbuf, *step_str; + int bsize, i; + + /* Protect copy from user */ + if (!access_ok(VERIFY_READ, buf, nbytes)) + return -EFAULT; + + memset(mybuf, 0, sizeof(mybuf)); + memset(idiag_cmd, 0, sizeof(*idiag_cmd)); + bsize = min(nbytes, (sizeof(mybuf)-1)); + + if (copy_from_user(mybuf, buf, bsize)) + return -EFAULT; + pbuf = &mybuf[0]; + step_str = strsep(&pbuf, "\t "); + + /* The opcode must present */ + if (!step_str) + return -EINVAL; + + idiag_cmd->opcode = simple_strtol(step_str, NULL, 0); + if (idiag_cmd->opcode == 0) + return -EINVAL; + + for (i = 0; i < LPFC_IDIAG_CMD_DATA_SIZE; i++) { + step_str = strsep(&pbuf, "\t "); + if (!step_str) + return 0; + idiag_cmd->data[i] = simple_strtol(step_str, NULL, 0); + } + return 0; +} + +/** + * lpfc_idiag_open - idiag open debugfs + * @inode: The inode pointer that contains a pointer to phba. + * @file: The file pointer to attach the file operation. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It + * gets the reference to phba from the i_private field in @inode, it then + * allocates buffer for the file operation, performs the necessary PCI config + * space read into the allocated buffer according to the idiag user command + * setup, and then returns a pointer to buffer in the private_data field in + * @file. + * + * Returns: + * This function returns zero if successful. On error it will return an + * negative error value. + **/ +static int +lpfc_idiag_open(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug; + + debug = kmalloc(sizeof(*debug), GFP_KERNEL); + if (!debug) + return -ENOMEM; + + debug->i_private = inode->i_private; + debug->buffer = NULL; + file->private_data = debug; + + return 0; +} + +/** + * lpfc_idiag_release - Release idiag access file operation + * @inode: The inode pointer that contains a vport pointer. (unused) + * @file: The file pointer that contains the buffer to release. + * + * Description: + * This routine is the generic release routine for the idiag access file + * operation, it frees the buffer that was allocated when the debugfs file + * was opened. + * + * Returns: + * This function returns zero. + **/ +static int +lpfc_idiag_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + /* Free the buffers to the file operation */ + kfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_idiag_cmd_release - Release idiag cmd access file operation + * @inode: The inode pointer that contains a vport pointer. (unused) + * @file: The file pointer that contains the buffer to release. + * + * Description: + * This routine frees the buffer that was allocated when the debugfs file + * was opened. It also reset the fields in the idiag command struct in the + * case the command is not continuous browsing of the data structure. + * + * Returns: + * This function returns zero. + **/ +static int +lpfc_idiag_cmd_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + /* Read PCI config register, if not read all, clear command fields */ + if ((debug->op == LPFC_IDIAG_OP_RD) && + (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD)) + if ((idiag.cmd.data[1] == sizeof(uint8_t)) || + (idiag.cmd.data[1] == sizeof(uint16_t)) || + (idiag.cmd.data[1] == sizeof(uint32_t))) + memset(&idiag, 0, sizeof(idiag)); + + /* Write PCI config register, clear command fields */ + if ((debug->op == LPFC_IDIAG_OP_WR) && + (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR)) + memset(&idiag, 0, sizeof(idiag)); + + /* Free the buffers to the file operation */ + kfree(debug->buffer); + kfree(debug); + + return 0; +} + +/** + * lpfc_idiag_pcicfg_read - idiag debugfs read pcicfg + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba pci config space according to the + * idiag command, and copies to user @buf. Depending on the PCI config space + * read command setup, it does either a single register read of a byte + * (8 bits), a word (16 bits), or a dword (32 bits) or browsing through all + * registers from the 4K extended PCI config space. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_pcicfg_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int offset_label, offset, len = 0, index = LPFC_PCI_CFG_RD_SIZE; + int where, count; + char *pbuffer; + struct pci_dev *pdev; + uint32_t u32val; + uint16_t u16val; + uint8_t u8val; + + pdev = phba->pcidev; + if (!pdev) + return 0; + + /* This is a user read operation */ + debug->op = LPFC_IDIAG_OP_RD; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_PCI_CFG_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { + where = idiag.cmd.data[0]; + count = idiag.cmd.data[1]; + } else + return 0; + + /* Read single PCI config space register */ + switch (count) { + case SIZE_U8: /* byte (8 bits) */ + pci_read_config_byte(pdev, where, &u8val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %02x\n", where, u8val); + break; + case SIZE_U16: /* word (16 bits) */ + pci_read_config_word(pdev, where, &u16val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %04x\n", where, u16val); + break; + case SIZE_U32: /* double word (32 bits) */ + pci_read_config_dword(pdev, where, &u32val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: %08x\n", where, u32val); + break; + case LPFC_PCI_CFG_SIZE: /* browse all */ + goto pcicfg_browse; + break; + default: + /* illegal count */ + len = 0; + break; + } + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); + +pcicfg_browse: + + /* Browse all PCI config space registers */ + offset_label = idiag.offset.last_rd; + offset = offset_label; + + /* Read PCI config space */ + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%03x: ", offset_label); + while (index > 0) { + pci_read_config_dword(pdev, offset, &u32val); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "%08x ", u32val); + offset += sizeof(uint32_t); + index -= sizeof(uint32_t); + if (!index) + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "\n"); + else if (!(index % (8 * sizeof(uint32_t)))) { + offset_label += (8 * sizeof(uint32_t)); + len += snprintf(pbuffer+len, LPFC_PCI_CFG_SIZE-len, + "\n%03x: ", offset_label); + } + } + + /* Set up the offset for next portion of pci cfg read */ + idiag.offset.last_rd += LPFC_PCI_CFG_RD_SIZE; + if (idiag.offset.last_rd >= LPFC_PCI_CFG_SIZE) + idiag.offset.last_rd = 0; + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + +/** + * lpfc_idiag_pcicfg_write - Syntax check and set up idiag pcicfg commands + * @file: The file pointer to read from. + * @buf: The buffer to copy the user data from. + * @nbytes: The number of bytes to get. + * @ppos: The position in the file to start reading from. + * + * This routine get the debugfs idiag command struct from user space and + * then perform the syntax check for PCI config space read or write command + * accordingly. In the case of PCI config space read command, it sets up + * the command in the idiag command struct for the debugfs read operation. + * In the case of PCI config space write operation, it executes the write + * operation into the PCI config space accordingly. + * + * It returns the @nbytges passing in from debugfs user space when successful. + * In case of error conditions, it returns proper error code back to the user + * space. + */ +static ssize_t +lpfc_idiag_pcicfg_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + uint32_t where, value, count; + uint32_t u32val; + uint16_t u16val; + uint8_t u8val; + struct pci_dev *pdev; + int rc; + + pdev = phba->pcidev; + if (!pdev) + return -EFAULT; + + /* This is a user write operation */ + debug->op = LPFC_IDIAG_OP_WR; + + rc = lpfc_idiag_cmd_get(buf, nbytes, &idiag.cmd); + if (rc) + return rc; + + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_RD) { + /* Read command from PCI config space, set up command fields */ + where = idiag.cmd.data[0]; + count = idiag.cmd.data[1]; + if (count == LPFC_PCI_CFG_SIZE) { + if (where != 0) + goto error_out; + } else if ((count != sizeof(uint8_t)) && + (count != sizeof(uint16_t)) && + (count != sizeof(uint32_t))) + goto error_out; + if (count == sizeof(uint8_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) + goto error_out; + if (where % sizeof(uint8_t)) + goto error_out; + } + if (count == sizeof(uint16_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) + goto error_out; + if (where % sizeof(uint16_t)) + goto error_out; + } + if (count == sizeof(uint32_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) + goto error_out; + if (where % sizeof(uint32_t)) + goto error_out; + } + } else if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR || + idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST || + idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + /* Write command to PCI config space, read-modify-write */ + where = idiag.cmd.data[0]; + count = idiag.cmd.data[1]; + value = idiag.cmd.data[2]; + /* Sanity checks */ + if ((count != sizeof(uint8_t)) && + (count != sizeof(uint16_t)) && + (count != sizeof(uint32_t))) + goto error_out; + if (count == sizeof(uint8_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint8_t)) + goto error_out; + if (where % sizeof(uint8_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_byte(pdev, where, + (uint8_t)value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_byte(pdev, where, &u8val); + if (!rc) { + u8val |= (uint8_t)value; + pci_write_config_byte(pdev, where, + u8val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_byte(pdev, where, &u8val); + if (!rc) { + u8val &= (uint8_t)(~value); + pci_write_config_byte(pdev, where, + u8val); + } + } + } + if (count == sizeof(uint16_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint16_t)) + goto error_out; + if (where % sizeof(uint16_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_word(pdev, where, + (uint16_t)value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_word(pdev, where, &u16val); + if (!rc) { + u16val |= (uint16_t)value; + pci_write_config_word(pdev, where, + u16val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_word(pdev, where, &u16val); + if (!rc) { + u16val &= (uint16_t)(~value); + pci_write_config_word(pdev, where, + u16val); + } + } + } + if (count == sizeof(uint32_t)) { + if (where > LPFC_PCI_CFG_SIZE - sizeof(uint32_t)) + goto error_out; + if (where % sizeof(uint32_t)) + goto error_out; + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_WR) + pci_write_config_dword(pdev, where, value); + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_ST) { + rc = pci_read_config_dword(pdev, where, + &u32val); + if (!rc) { + u32val |= value; + pci_write_config_dword(pdev, where, + u32val); + } + } + if (idiag.cmd.opcode == LPFC_IDIAG_CMD_PCICFG_CL) { + rc = pci_read_config_dword(pdev, where, + &u32val); + if (!rc) { + u32val &= ~value; + pci_write_config_dword(pdev, where, + u32val); + } + } + } + } else + /* All other opecodes are illegal for now */ + goto error_out; + + return nbytes; +error_out: + memset(&idiag, 0, sizeof(idiag)); + return -EINVAL; +} + +/** + * lpfc_idiag_queinfo_read - idiag debugfs read queue information + * @file: The file pointer to read from. + * @buf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the @phba SLI4 PCI function queue information, + * and copies to user @buf. + * + * Returns: + * This function returns the amount of data that was read (this could be less + * than @nbytes if the end of the file was reached) or a negative error value. + **/ +static ssize_t +lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes, + loff_t *ppos) +{ + struct lpfc_debug *debug = file->private_data; + struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private; + int len = 0, fcp_qidx; + char *pbuffer; + + if (!debug->buffer) + debug->buffer = kmalloc(LPFC_QUE_INFO_GET_BUF_SIZE, GFP_KERNEL); + if (!debug->buffer) + return 0; + pbuffer = debug->buffer; + + if (*ppos) + return 0; + + /* Get slow-path event queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path EQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], EQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.sp_eq->queue_id, + phba->sli4_hba.sp_eq->entry_count, + phba->sli4_hba.sp_eq->host_index, + phba->sli4_hba.sp_eq->hba_index); + + /* Get fast-path event queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Fast-path EQ information:\n"); + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], EQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.fp_eq[fcp_qidx]->queue_id, + phba->sli4_hba.fp_eq[fcp_qidx]->entry_count, + phba->sli4_hba.fp_eq[fcp_qidx]->host_index, + phba->sli4_hba.fp_eq[fcp_qidx]->hba_index); + } + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + + /* Get mailbox complete queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Mailbox CQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated EQ-ID [%02d]:\n", + phba->sli4_hba.mbx_cq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], CQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.mbx_cq->queue_id, + phba->sli4_hba.mbx_cq->entry_count, + phba->sli4_hba.mbx_cq->host_index, + phba->sli4_hba.mbx_cq->hba_index); + + /* Get slow-path complete queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path CQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated EQ-ID [%02d]:\n", + phba->sli4_hba.els_cq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], CQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.els_cq->queue_id, + phba->sli4_hba.els_cq->entry_count, + phba->sli4_hba.els_cq->host_index, + phba->sli4_hba.els_cq->hba_index); + + /* Get fast-path complete queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Fast-path CQ information:\n"); + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) { + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated EQ-ID [%02d]:\n", + phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], EQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.fcp_cq[fcp_qidx]->queue_id, + phba->sli4_hba.fcp_cq[fcp_qidx]->entry_count, + phba->sli4_hba.fcp_cq[fcp_qidx]->host_index, + phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index); + } + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + + /* Get mailbox queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Mailbox MQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.mbx_wq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], MQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.mbx_wq->queue_id, + phba->sli4_hba.mbx_wq->entry_count, + phba->sli4_hba.mbx_wq->host_index, + phba->sli4_hba.mbx_wq->hba_index); + + /* Get slow-path work queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path WQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.els_wq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], WQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n\n", + phba->sli4_hba.els_wq->queue_id, + phba->sli4_hba.els_wq->entry_count, + phba->sli4_hba.els_wq->host_index, + phba->sli4_hba.els_wq->hba_index); + + /* Get fast-path work queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Fast-path WQ information:\n"); + for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) { + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], WQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.fcp_wq[fcp_qidx]->queue_id, + phba->sli4_hba.fcp_wq[fcp_qidx]->entry_count, + phba->sli4_hba.fcp_wq[fcp_qidx]->host_index, + phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index); + } + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); + + /* Get receive queue information */ + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "Slow-path RQ information:\n"); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\t\tAssociated CQ-ID [%02d]:\n", + phba->sli4_hba.hdr_rq->assoc_qid); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], RHQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.hdr_rq->queue_id, + phba->sli4_hba.hdr_rq->entry_count, + phba->sli4_hba.hdr_rq->host_index, + phba->sli4_hba.hdr_rq->hba_index); + len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, + "\tID [%02d], RDQE-COUNT [%04d], " + "HOST-INDEX [%04x], PORT-INDEX [%04x]\n", + phba->sli4_hba.dat_rq->queue_id, + phba->sli4_hba.dat_rq->entry_count, + phba->sli4_hba.dat_rq->host_index, + phba->sli4_hba.dat_rq->hba_index); + + return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); +} + #undef lpfc_debugfs_op_disc_trc static const struct file_operations lpfc_debugfs_op_disc_trc = { .owner = THIS_MODULE, @@ -1213,6 +1964,28 @@ static const struct file_operations lpfc_debugfs_op_slow_ring_trc = { static struct dentry *lpfc_debugfs_root = NULL; static atomic_t lpfc_debugfs_hba_count; + +/* + * File operations for the iDiag debugfs + */ +#undef lpfc_idiag_op_pciCfg +static const struct file_operations lpfc_idiag_op_pciCfg = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_idiag_pcicfg_read, + .write = lpfc_idiag_pcicfg_write, + .release = lpfc_idiag_cmd_release, +}; + +#undef lpfc_idiag_op_queInfo +static const struct file_operations lpfc_idiag_op_queInfo = { + .owner = THIS_MODULE, + .open = lpfc_idiag_open, + .read = lpfc_idiag_queinfo_read, + .release = lpfc_idiag_release, +}; + #endif /** @@ -1249,8 +2022,8 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) if (!lpfc_debugfs_start_time) lpfc_debugfs_start_time = jiffies; - /* Setup lpfcX directory for specific HBA */ - snprintf(name, sizeof(name), "lpfc%d", phba->brd_no); + /* Setup funcX directory for specific HBA PCI function */ + snprintf(name, sizeof(name), "fn%d", phba->brd_no); if (!phba->hba_debugfs_root) { phba->hba_debugfs_root = debugfs_create_dir(name, lpfc_debugfs_root); @@ -1275,28 +2048,38 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } /* Setup dumpHBASlim */ - snprintf(name, sizeof(name), "dumpHBASlim"); - phba->debug_dumpHBASlim = - debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, - phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_dumpHBASlim); - if (!phba->debug_dumpHBASlim) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0413 Cannot create debugfs dumpHBASlim\n"); - goto debug_failed; - } + if (phba->sli_rev < LPFC_SLI_REV4) { + snprintf(name, sizeof(name), "dumpHBASlim"); + phba->debug_dumpHBASlim = + debugfs_create_file(name, + S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpHBASlim); + if (!phba->debug_dumpHBASlim) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0413 Cannot create debugfs " + "dumpHBASlim\n"); + goto debug_failed; + } + } else + phba->debug_dumpHBASlim = NULL; /* Setup dumpHostSlim */ - snprintf(name, sizeof(name), "dumpHostSlim"); - phba->debug_dumpHostSlim = - debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, - phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_dumpHostSlim); - if (!phba->debug_dumpHostSlim) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, - "0414 Cannot create debugfs dumpHostSlim\n"); - goto debug_failed; - } + if (phba->sli_rev < LPFC_SLI_REV4) { + snprintf(name, sizeof(name), "dumpHostSlim"); + phba->debug_dumpHostSlim = + debugfs_create_file(name, + S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dumpHostSlim); + if (!phba->debug_dumpHostSlim) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0414 Cannot create debugfs " + "dumpHostSlim\n"); + goto debug_failed; + } + } else + phba->debug_dumpHBASlim = NULL; /* Setup dumpData */ snprintf(name, sizeof(name), "dumpData"); @@ -1322,8 +2105,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } - - /* Setup slow ring trace */ if (lpfc_debugfs_max_slow_ring_trc) { num = lpfc_debugfs_max_slow_ring_trc - 1; @@ -1342,7 +2123,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } } - snprintf(name, sizeof(name), "slow_ring_trace"); phba->debug_slow_ring_trc = debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, @@ -1434,6 +2214,53 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) "0409 Cant create debugfs nodelist\n"); goto debug_failed; } + + /* + * iDiag debugfs root entry points for SLI4 device only + */ + if (phba->sli_rev < LPFC_SLI_REV4) + goto debug_failed; + + snprintf(name, sizeof(name), "iDiag"); + if (!phba->idiag_root) { + phba->idiag_root = + debugfs_create_dir(name, phba->hba_debugfs_root); + if (!phba->idiag_root) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2922 Can't create idiag debugfs\n"); + goto debug_failed; + } + /* Initialize iDiag data structure */ + memset(&idiag, 0, sizeof(idiag)); + } + + /* iDiag read PCI config space */ + snprintf(name, sizeof(name), "pciCfg"); + if (!phba->idiag_pci_cfg) { + phba->idiag_pci_cfg = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->idiag_root, phba, &lpfc_idiag_op_pciCfg); + if (!phba->idiag_pci_cfg) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2923 Can't create idiag debugfs\n"); + goto debug_failed; + } + idiag.offset.last_rd = 0; + } + + /* iDiag get PCI function queue information */ + snprintf(name, sizeof(name), "queInfo"); + if (!phba->idiag_que_info) { + phba->idiag_que_info = + debugfs_create_file(name, S_IFREG|S_IRUGO, + phba->idiag_root, phba, &lpfc_idiag_op_queInfo); + if (!phba->idiag_que_info) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "2924 Can't create idiag debugfs\n"); + goto debug_failed; + } + } + debug_failed: return; #endif @@ -1508,8 +2335,31 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) phba->debug_slow_ring_trc = NULL; } + /* + * iDiag release + */ + if (phba->sli_rev == LPFC_SLI_REV4) { + if (phba->idiag_que_info) { + /* iDiag queInfo */ + debugfs_remove(phba->idiag_que_info); + phba->idiag_que_info = NULL; + } + if (phba->idiag_pci_cfg) { + /* iDiag pciCfg */ + debugfs_remove(phba->idiag_pci_cfg); + phba->idiag_pci_cfg = NULL; + } + + /* Finally remove the iDiag debugfs root */ + if (phba->idiag_root) { + /* iDiag root */ + debugfs_remove(phba->idiag_root); + phba->idiag_root = NULL; + } + } + if (phba->hba_debugfs_root) { - debugfs_remove(phba->hba_debugfs_root); /* lpfcX */ + debugfs_remove(phba->hba_debugfs_root); /* fnX */ phba->hba_debugfs_root = NULL; atomic_dec(&lpfc_debugfs_hba_count); } diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 03c7313a1012..91b9a9427cda 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007 Emulex. All rights reserved. * + * Copyright (C) 2007-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -22,6 +22,44 @@ #define _H_LPFC_DEBUG_FS #ifdef CONFIG_SCSI_LPFC_DEBUG_FS + +/* size of output line, for discovery_trace and slow_ring_trace */ +#define LPFC_DEBUG_TRC_ENTRY_SIZE 100 + +/* nodelist output buffer size */ +#define LPFC_NODELIST_SIZE 8192 +#define LPFC_NODELIST_ENTRY_SIZE 120 + +/* dumpHBASlim output buffer size */ +#define LPFC_DUMPHBASLIM_SIZE 4096 + +/* dumpHostSlim output buffer size */ +#define LPFC_DUMPHOSTSLIM_SIZE 4096 + +/* hbqinfo output buffer size */ +#define LPFC_HBQINFO_SIZE 8192 + +/* rdPciConf output buffer size */ +#define LPFC_PCI_CFG_SIZE 4096 +#define LPFC_PCI_CFG_RD_BUF_SIZE (LPFC_PCI_CFG_SIZE/2) +#define LPFC_PCI_CFG_RD_SIZE (LPFC_PCI_CFG_SIZE/4) + +/* queue info output buffer size */ +#define LPFC_QUE_INFO_GET_BUF_SIZE 2048 + +#define SIZE_U8 sizeof(uint8_t) +#define SIZE_U16 sizeof(uint16_t) +#define SIZE_U32 sizeof(uint32_t) + +struct lpfc_debug { + char *i_private; + char op; +#define LPFC_IDIAG_OP_RD 1 +#define LPFC_IDIAG_OP_WR 2 + char *buffer; + int len; +}; + struct lpfc_debugfs_trc { char *fmt; uint32_t data1; @@ -30,6 +68,26 @@ struct lpfc_debugfs_trc { uint32_t seq_cnt; unsigned long jif; }; + +struct lpfc_idiag_offset { + uint32_t last_rd; +}; + +#define LPFC_IDIAG_CMD_DATA_SIZE 4 +struct lpfc_idiag_cmd { + uint32_t opcode; +#define LPFC_IDIAG_CMD_PCICFG_RD 0x00000001 +#define LPFC_IDIAG_CMD_PCICFG_WR 0x00000002 +#define LPFC_IDIAG_CMD_PCICFG_ST 0x00000003 +#define LPFC_IDIAG_CMD_PCICFG_CL 0x00000004 + uint32_t data[LPFC_IDIAG_CMD_DATA_SIZE]; +}; + +struct lpfc_idiag { + uint32_t active; + struct lpfc_idiag_cmd cmd; + struct lpfc_idiag_offset offset; +}; #endif /* Mask for discovery_trace */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index c62d567cc845..8e28edf9801e 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -485,6 +485,59 @@ fail: } /** + * lpfc_check_clean_addr_bit - Check whether assigned FCID is clean. + * @vport: pointer to a host virtual N_Port data structure. + * @sp: pointer to service parameter data structure. + * + * This routine is called from FLOGI/FDISC completion handler functions. + * lpfc_check_clean_addr_bit return 1 when FCID/Fabric portname/ Fabric + * node nodename is changed in the completion service parameter else return + * 0. This function also set flag in the vport data structure to delay + * NP_Port discovery after the FLOGI/FDISC completion if Clean address bit + * in FLOGI/FDISC response is cleared and FCID/Fabric portname/ Fabric + * node nodename is changed in the completion service parameter. + * + * Return code + * 0 - FCID and Fabric Nodename and Fabric portname is not changed. + * 1 - FCID or Fabric Nodename or Fabric portname is changed. + * + **/ +static uint8_t +lpfc_check_clean_addr_bit(struct lpfc_vport *vport, + struct serv_parm *sp) +{ + uint8_t fabric_param_changed = 0; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + if ((vport->fc_prevDID != vport->fc_myDID) || + memcmp(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)) || + memcmp(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name))) + fabric_param_changed = 1; + + /* + * Word 1 Bit 31 in common service parameter is overloaded. + * Word 1 Bit 31 in FLOGI request is multiple NPort request + * Word 1 Bit 31 in FLOGI response is clean address bit + * + * If fabric parameter is changed and clean address bit is + * cleared delay nport discovery if + * - vport->fc_prevDID != 0 (not initial discovery) OR + * - lpfc_delay_discovery module parameter is set. + */ + if (fabric_param_changed && !sp->cmn.clean_address_bit && + (vport->fc_prevDID || lpfc_delay_discovery)) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + } + + return fabric_param_changed; +} + + +/** * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. @@ -512,6 +565,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *np; struct lpfc_nodelist *next_np; + uint8_t fabric_param_changed; spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_FABRIC; @@ -544,6 +598,12 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; + + fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); + memcpy(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)); + memcpy(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name)); memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm)); if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { @@ -565,7 +625,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } } - if ((vport->fc_prevDID != vport->fc_myDID) && + if (fabric_param_changed && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* If our NportID changed, we need to ensure all @@ -2203,6 +2263,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_sli *psli; + struct lpfcMboxq *mbox; psli = &phba->sli; /* we pass cmdiocb to state machine which needs rspiocb as well */ @@ -2260,6 +2321,21 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, NLP_EVT_CMPL_LOGO); out: lpfc_els_free_iocb(phba, cmdiocb); + /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */ + if ((vport->fc_flag & FC_PT2PT) && + !(vport->fc_flag & FC_PT2PT_PLOGI)) { + phba->pport->fc_myDID = 0; + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_config_link(phba, mbox); + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->vport = vport; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) == + MBX_NOT_FINISHED) { + mempool_free(mbox, phba->mbox_mem_pool); + } + } + } return; } @@ -2745,7 +2821,8 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) } break; case ELS_CMD_FDISC: - lpfc_issue_els_fdisc(vport, ndlp, retry); + if (!(vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)) + lpfc_issue_els_fdisc(vport, ndlp, retry); break; } return; @@ -2815,9 +2892,17 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, switch (irsp->ulpStatus) { case IOSTAT_FCP_RSP_ERROR: + break; case IOSTAT_REMOTE_STOP: + if (phba->sli_rev == LPFC_SLI_REV4) { + /* This IO was aborted by the target, we don't + * know the rxid and because we did not send the + * ABTS we cannot generate and RRQ. + */ + lpfc_set_rrq_active(phba, ndlp, + cmdiocb->sli4_xritag, 0, 0); + } break; - case IOSTAT_LOCAL_REJECT: switch ((irsp->un.ulpWord[4] & 0xff)) { case IOERR_LOOP_OPEN_FAILURE: @@ -4013,28 +4098,34 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport, uint8_t *pcmd; struct RRQ *rrq; uint16_t rxid; + uint16_t xri; struct lpfc_node_rrq *prrq; pcmd = (uint8_t *) (((struct lpfc_dmabuf *) iocb->context2)->virt); pcmd += sizeof(uint32_t); rrq = (struct RRQ *)pcmd; - rxid = bf_get(rrq_oxid, rrq); + rrq->rrq_exchg = be32_to_cpu(rrq->rrq_exchg); + rxid = be16_to_cpu(bf_get(rrq_rxid, rrq)); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2883 Clear RRQ for SID:x%x OXID:x%x RXID:x%x" " x%x x%x\n", - bf_get(rrq_did, rrq), - bf_get(rrq_oxid, rrq), + be32_to_cpu(bf_get(rrq_did, rrq)), + be16_to_cpu(bf_get(rrq_oxid, rrq)), rxid, iocb->iotag, iocb->iocb.ulpContext); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); - prrq = lpfc_get_active_rrq(vport, rxid, ndlp->nlp_DID); + if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) + xri = be16_to_cpu(bf_get(rrq_oxid, rrq)); + else + xri = rxid; + prrq = lpfc_get_active_rrq(vport, xri, ndlp->nlp_DID); if (prrq) - lpfc_clr_rrq_active(phba, rxid, prrq); + lpfc_clr_rrq_active(phba, xri, prrq); return; } @@ -6166,6 +6257,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (vport->load_flag & FC_UNLOADING) goto dropit; + /* If NPort discovery is delayed drop incoming ELS */ + if ((vport->fc_flag & FC_DISC_DELAYED) && + (cmd != ELS_CMD_PLOGI)) + goto dropit; + ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { /* Cannot find existing Fabric ndlp, so allocate a new one */ @@ -6218,6 +6314,12 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); lpfc_send_els_event(vport, ndlp, payload); + + /* If Nport discovery is delayed, reject PLOGIs */ + if (vport->fc_flag & FC_DISC_DELAYED) { + rjt_err = LSRJT_UNABLE_TPC; + break; + } if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { @@ -6596,6 +6698,21 @@ void lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *ndlp_fdmi; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + + /* + * If lpfc_delay_discovery parameter is set and the clean address + * bit is cleared and fc fabric parameters chenged, delay FC NPort + * discovery. + */ + spin_lock_irq(shost->host_lock); + if (vport->fc_flag & FC_DISC_DELAYED) { + spin_unlock_irq(shost->host_lock); + mod_timer(&vport->delayed_disc_tmo, + jiffies + HZ * phba->fc_ratov); + return; + } + spin_unlock_irq(shost->host_lock); ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ndlp) { @@ -6938,6 +7055,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *next_np; IOCB_t *irsp = &rspiocb->iocb; struct lpfc_iocbq *piocb; + struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; + struct serv_parm *sp; + uint8_t fabric_param_changed; lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0123 FDISC completes. x%x/x%x prevDID: x%x\n", @@ -6981,7 +7101,14 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); - if ((vport->fc_prevDID != vport->fc_myDID) && + prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); + sp = prsp->virt + sizeof(uint32_t); + fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); + memcpy(&vport->fabric_portname, &sp->portName, + sizeof(struct lpfc_name)); + memcpy(&vport->fabric_nodename, &sp->nodeName, + sizeof(struct lpfc_name)); + if (fabric_param_changed && !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) { /* If our NportID changed, we need to ensure all * remaining NPORTs get unreg_login'ed so we can @@ -7582,6 +7709,32 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba) } /** + * lpfc_sli4_vport_delete_els_xri_aborted -Remove all ndlp references for vport + * @vport: pointer to lpfc vport data structure. + * + * This routine is invoked by the vport cleanup for deletions and the cleanup + * for an ndlp on removal. + **/ +void +lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_sgl_list_lock); + list_for_each_entry_safe(sglq_entry, sglq_next, + &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { + if (sglq_entry->ndlp && sglq_entry->ndlp->vport == vport) + sglq_entry->ndlp = NULL; + } + spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); + return; +} + +/** * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort * @phba: pointer to lpfc hba data structure. * @axri: pointer to the els xri abort wcqe structure. diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index bb015960dbc9..154c715fb3af 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -658,6 +658,8 @@ lpfc_work_done(struct lpfc_hba *phba) lpfc_ramp_down_queue_handler(phba); if (work_port_events & WORKER_RAMP_UP_QUEUE) lpfc_ramp_up_queue_handler(phba); + if (work_port_events & WORKER_DELAYED_DISC_TMO) + lpfc_delayed_disc_timeout_handler(vport); } lpfc_destroy_vport_work_array(phba, vports); @@ -838,6 +840,11 @@ lpfc_linkdown_port(struct lpfc_vport *vport) lpfc_port_link_failure(vport); + /* Stop delayed Nport discovery */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_DISC_DELAYED; + spin_unlock_irq(shost->host_lock); + del_timer_sync(&vport->delayed_disc_tmo); } int @@ -3160,7 +3167,7 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) spin_unlock_irq(shost->host_lock); vport->unreg_vpi_cmpl = VPORT_OK; mempool_free(pmb, phba->mbox_mem_pool); - lpfc_cleanup_vports_rrqs(vport); + lpfc_cleanup_vports_rrqs(vport, NULL); /* * This shost reference might have been taken at the beginning of * lpfc_vport_delete() @@ -3900,6 +3907,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) return; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); + if (vport->phba->sli_rev == LPFC_SLI_REV4) + lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_nlp_put(ndlp); return; } @@ -4289,7 +4298,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp); - + lpfc_cleanup_vports_rrqs(vport, ndlp); lpfc_unreg_rpi(vport, ndlp); return 0; @@ -4426,10 +4435,11 @@ lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; + unsigned long iflags; - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, iflags); ndlp = __lpfc_findnode_did(vport, did); - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, iflags); return ndlp; } diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 96ed3ba6ba95..94ae37c5111a 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -341,6 +341,12 @@ struct csp { uint8_t bbCreditMsb; uint8_t bbCreditlsb; /* FC Word 0, byte 3 */ +/* + * Word 1 Bit 31 in common service parameter is overloaded. + * Word 1 Bit 31 in FLOGI request is multiple NPort request + * Word 1 Bit 31 in FLOGI response is clean address bit + */ +#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */ #ifdef __BIG_ENDIAN_BITFIELD uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */ uint16_t randomOffset:1; /* FC Word 1, bit 30 */ @@ -3198,7 +3204,10 @@ typedef struct { #define IOERR_SLER_RRQ_RJT_ERR 0x4C #define IOERR_SLER_RRQ_RETRY_ERR 0x4D #define IOERR_SLER_ABTS_ERR 0x4E - +#define IOERR_ELXSEC_KEY_UNWRAP_ERROR 0xF0 +#define IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR 0xF1 +#define IOERR_ELXSEC_CRYPTO_ERROR 0xF2 +#define IOERR_ELXSEC_CRYPTO_COMPARE_ERROR 0xF3 #define IOERR_DRVR_MASK 0x100 #define IOERR_SLI_DOWN 0x101 /* ulpStatus - Driver defined */ #define IOERR_SLI_BRESET 0x102 diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 94c1aa1136de..c7178d60c7bf 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -778,6 +778,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A #define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D #define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A +#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 /* FCoE Opcodes */ #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 @@ -1852,6 +1853,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 #define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 +#define lpfc_mbx_rq_ftr_rq_perfh_SHIFT 11 +#define lpfc_mbx_rq_ftr_rq_perfh_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rq_perfh_WORD word2 uint32_t word3; #define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 #define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 @@ -1877,6 +1881,9 @@ struct lpfc_mbx_request_features { #define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7 #define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001 #define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 +#define lpfc_mbx_rq_ftr_rsp_perfh_SHIFT 11 +#define lpfc_mbx_rq_ftr_rsp_perfh_MASK 0x00000001 +#define lpfc_mbx_rq_ftr_rsp_perfh_WORD word3 }; struct lpfc_mbx_supp_pages { @@ -1935,7 +1942,7 @@ struct lpfc_mbx_supp_pages { #define LPFC_SLI4_PARAMETERS 2 }; -struct lpfc_mbx_sli4_params { +struct lpfc_mbx_pc_sli4_params { uint32_t word1; #define qs_SHIFT 0 #define qs_MASK 0x00000001 @@ -2051,6 +2058,88 @@ struct lpfc_mbx_sli4_params { uint32_t rsvd_13_63[51]; }; +struct lpfc_sli4_parameters { + uint32_t word0; +#define cfg_prot_type_SHIFT 0 +#define cfg_prot_type_MASK 0x000000FF +#define cfg_prot_type_WORD word0 + uint32_t word1; +#define cfg_ft_SHIFT 0 +#define cfg_ft_MASK 0x00000001 +#define cfg_ft_WORD word1 +#define cfg_sli_rev_SHIFT 4 +#define cfg_sli_rev_MASK 0x0000000f +#define cfg_sli_rev_WORD word1 +#define cfg_sli_family_SHIFT 8 +#define cfg_sli_family_MASK 0x0000000f +#define cfg_sli_family_WORD word1 +#define cfg_if_type_SHIFT 12 +#define cfg_if_type_MASK 0x0000000f +#define cfg_if_type_WORD word1 +#define cfg_sli_hint_1_SHIFT 16 +#define cfg_sli_hint_1_MASK 0x000000ff +#define cfg_sli_hint_1_WORD word1 +#define cfg_sli_hint_2_SHIFT 24 +#define cfg_sli_hint_2_MASK 0x0000001f +#define cfg_sli_hint_2_WORD word1 + uint32_t word2; + uint32_t word3; + uint32_t word4; +#define cfg_cqv_SHIFT 14 +#define cfg_cqv_MASK 0x00000003 +#define cfg_cqv_WORD word4 + uint32_t word5; + uint32_t word6; +#define cfg_mqv_SHIFT 14 +#define cfg_mqv_MASK 0x00000003 +#define cfg_mqv_WORD word6 + uint32_t word7; + uint32_t word8; +#define cfg_wqv_SHIFT 14 +#define cfg_wqv_MASK 0x00000003 +#define cfg_wqv_WORD word8 + uint32_t word9; + uint32_t word10; +#define cfg_rqv_SHIFT 14 +#define cfg_rqv_MASK 0x00000003 +#define cfg_rqv_WORD word10 + uint32_t word11; +#define cfg_rq_db_window_SHIFT 28 +#define cfg_rq_db_window_MASK 0x0000000f +#define cfg_rq_db_window_WORD word11 + uint32_t word12; +#define cfg_fcoe_SHIFT 0 +#define cfg_fcoe_MASK 0x00000001 +#define cfg_fcoe_WORD word12 +#define cfg_phwq_SHIFT 15 +#define cfg_phwq_MASK 0x00000001 +#define cfg_phwq_WORD word12 +#define cfg_loopbk_scope_SHIFT 28 +#define cfg_loopbk_scope_MASK 0x0000000f +#define cfg_loopbk_scope_WORD word12 + uint32_t sge_supp_len; + uint32_t word14; +#define cfg_sgl_page_cnt_SHIFT 0 +#define cfg_sgl_page_cnt_MASK 0x0000000f +#define cfg_sgl_page_cnt_WORD word14 +#define cfg_sgl_page_size_SHIFT 8 +#define cfg_sgl_page_size_MASK 0x000000ff +#define cfg_sgl_page_size_WORD word14 +#define cfg_sgl_pp_align_SHIFT 16 +#define cfg_sgl_pp_align_MASK 0x000000ff +#define cfg_sgl_pp_align_WORD word14 + uint32_t word15; + uint32_t word16; + uint32_t word17; + uint32_t word18; + uint32_t word19; +}; + +struct lpfc_mbx_get_sli4_parameters { + struct mbox_header header; + struct lpfc_sli4_parameters sli4_parameters; +}; + /* Mailbox Completion Queue Error Messages */ #define MB_CQE_STATUS_SUCCESS 0x0 #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 @@ -2103,7 +2192,8 @@ struct lpfc_mqe { struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; struct lpfc_mbx_query_fw_cfg query_fw_cfg; struct lpfc_mbx_supp_pages supp_pages; - struct lpfc_mbx_sli4_params sli4_params; + struct lpfc_mbx_pc_sli4_params sli4_params; + struct lpfc_mbx_get_sli4_parameters get_sli4_parameters; struct lpfc_mbx_nop nop; } un; }; @@ -2381,6 +2471,10 @@ struct wqe_common { #define wqe_wqes_SHIFT 15 #define wqe_wqes_MASK 0x00000001 #define wqe_wqes_WORD word10 +/* Note that this field overlaps above fields */ +#define wqe_wqid_SHIFT 1 +#define wqe_wqid_MASK 0x0000007f +#define wqe_wqid_WORD word10 #define wqe_pri_SHIFT 16 #define wqe_pri_MASK 0x00000007 #define wqe_pri_WORD word10 @@ -2599,7 +2693,8 @@ struct fcp_iwrite64_wqe { uint32_t total_xfer_len; uint32_t initial_xfer_len; struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ + uint32_t rsrvd12; + struct ulp_bde64 ph_bde; /* words 13-15 */ }; struct fcp_iread64_wqe { @@ -2608,7 +2703,8 @@ struct fcp_iread64_wqe { uint32_t total_xfer_len; /* word 4 */ uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ + uint32_t rsrvd12; + struct ulp_bde64 ph_bde; /* words 13-15 */ }; struct fcp_icmnd64_wqe { diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 6d0b36aa3389..35665cfb5689 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -460,7 +460,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) && !(phba->lmt & LMT_16Gb))) { /* Reset link speed to auto */ - lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT, + lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, "1302 Invalid speed for this board: " "Reset link speed to auto: x%x\n", phba->cfg_link_speed); @@ -945,17 +945,13 @@ static void lpfc_rrq_timeout(unsigned long ptr) { struct lpfc_hba *phba; - uint32_t tmo_posted; unsigned long iflag; phba = (struct lpfc_hba *)ptr; spin_lock_irqsave(&phba->pport->work_port_lock, iflag); - tmo_posted = phba->hba_flag & HBA_RRQ_ACTIVE; - if (!tmo_posted) - phba->hba_flag |= HBA_RRQ_ACTIVE; + phba->hba_flag |= HBA_RRQ_ACTIVE; spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); - if (!tmo_posted) - lpfc_worker_wake_up(phba); + lpfc_worker_wake_up(phba); } /** @@ -2280,6 +2276,7 @@ lpfc_cleanup(struct lpfc_vport *vport) /* Wait for any activity on ndlps to settle */ msleep(10); } + lpfc_cleanup_vports_rrqs(vport, NULL); } /** @@ -2295,6 +2292,7 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) { del_timer_sync(&vport->els_tmofunc); del_timer_sync(&vport->fc_fdmitmo); + del_timer_sync(&vport->delayed_disc_tmo); lpfc_can_disctmo(vport); return; } @@ -2355,6 +2353,10 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba) del_timer_sync(&phba->fabric_block_timer); del_timer_sync(&phba->eratt_poll); del_timer_sync(&phba->hb_tmofunc); + if (phba->sli_rev == LPFC_SLI_REV4) { + del_timer_sync(&phba->rrq_tmr); + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + } phba->hb_outstanding = 0; switch (phba->pci_dev_grp) { @@ -2732,6 +2734,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) init_timer(&vport->els_tmofunc); vport->els_tmofunc.function = lpfc_els_timeout; vport->els_tmofunc.data = (unsigned long)vport; + + init_timer(&vport->delayed_disc_tmo); + vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo; + vport->delayed_disc_tmo.data = (unsigned long)vport; + error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) goto out_put_shost; @@ -4283,36 +4290,37 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_bsmbx; } - /* Get the Supported Pages. It is always available. */ + /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */ lpfc_supported_pages(mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - if (unlikely(rc)) { - rc = -EIO; - mempool_free(mboxq, phba->mbox_mem_pool); - goto out_free_bsmbx; - } - - mqe = &mboxq->u.mqe; - memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), - LPFC_MAX_SUPPORTED_PAGES); - for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { - switch (pn_page[i]) { - case LPFC_SLI4_PARAMETERS: - phba->sli4_hba.pc_sli4_params.supported = 1; - break; - default: - break; + if (!rc) { + mqe = &mboxq->u.mqe; + memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3), + LPFC_MAX_SUPPORTED_PAGES); + for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) { + switch (pn_page[i]) { + case LPFC_SLI4_PARAMETERS: + phba->sli4_hba.pc_sli4_params.supported = 1; + break; + default: + break; + } + } + /* Read the port's SLI4 Parameters capabilities if supported. */ + if (phba->sli4_hba.pc_sli4_params.supported) + rc = lpfc_pc_sli4_params_get(phba, mboxq); + if (rc) { + mempool_free(mboxq, phba->mbox_mem_pool); + rc = -EIO; + goto out_free_bsmbx; } } - - /* Read the port's SLI4 Parameters capabilities if supported. */ - if (phba->sli4_hba.pc_sli4_params.supported) - rc = lpfc_pc_sli4_params_get(phba, mboxq); + /* + * Get sli4 parameters that override parameters from Port capabilities. + * If this call fails it is not a critical error so continue loading. + */ + lpfc_get_sli4_parameters(phba, mboxq); mempool_free(mboxq, phba->mbox_mem_pool); - if (rc) { - rc = -EIO; - goto out_free_bsmbx; - } /* Create all the SLI4 queues */ rc = lpfc_sli4_queue_create(phba); if (rc) @@ -7810,7 +7818,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) mqe = &mboxq->u.mqe; /* Read the port's SLI4 Parameters port capabilities */ - lpfc_sli4_params(mboxq); + lpfc_pc_sli4_params(mboxq); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); else { @@ -7854,6 +7862,66 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) } /** + * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS. + * @phba: Pointer to HBA context object. + * @mboxq: Pointer to the mailboxq memory for the mailbox command response. + * + * This function is called in the SLI4 code path to read the port's + * sli4 capabilities. + * + * This function may be be called from any context that can block-wait + * for the completion. The expectation is that this routine is called + * typically from probe_one or from the online routine. + **/ +int +lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) +{ + int rc; + struct lpfc_mqe *mqe = &mboxq->u.mqe; + struct lpfc_pc_sli4_params *sli4_params; + int length; + struct lpfc_sli4_parameters *mbx_sli4_parameters; + + /* Read the port's SLI4 Config Parameters */ + length = (sizeof(struct lpfc_mbx_get_sli4_parameters) - + sizeof(struct lpfc_sli4_cfg_mhdr)); + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS, + length, LPFC_SLI4_MBX_EMBED); + if (!phba->sli4_hba.intr_enable) + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + else + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, + lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG)); + if (unlikely(rc)) + return rc; + sli4_params = &phba->sli4_hba.pc_sli4_params; + mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters; + sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters); + sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters); + sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters); + sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1, + mbx_sli4_parameters); + sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2, + mbx_sli4_parameters); + if (bf_get(cfg_phwq, mbx_sli4_parameters)) + phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED; + else + phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED; + sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len; + sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); + sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters); + sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters); + sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); + sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters); + sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, + mbx_sli4_parameters); + sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align, + mbx_sli4_parameters); + return 0; +} + +/** * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. * @pdev: pointer to PCI device * @pid: pointer to PCI device identifier diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 23403c650207..dba32dfdb59b 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1263,7 +1263,8 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { if (phba->cfg_enable_bg) mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ - mb->un.varCfgPort.cdss = 1; /* Configure Security */ + if (phba->cfg_enable_dss) + mb->un.varCfgPort.cdss = 1; /* Configure Security */ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); @@ -1692,7 +1693,7 @@ lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) * @mbox: pointer to lpfc mbox command. * @subsystem: The sli4 config sub mailbox subsystem. * @opcode: The sli4 config sub mailbox command opcode. - * @length: Length of the sli4 config mailbox command. + * @length: Length of the sli4 config mailbox command (including sub-header). * * This routine sets up the header fields of SLI4 specific mailbox command * for sending IOCTL command. @@ -1723,14 +1724,14 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, if (emb) { /* Set up main header fields */ bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); - sli4_config->header.cfg_mhdr.payload_length = - LPFC_MBX_CMD_HDR_LENGTH + length; + sli4_config->header.cfg_mhdr.payload_length = length; /* Set up sub-header fields following main header */ bf_set(lpfc_mbox_hdr_opcode, &sli4_config->header.cfg_shdr.request, opcode); bf_set(lpfc_mbox_hdr_subsystem, &sli4_config->header.cfg_shdr.request, subsystem); - sli4_config->header.cfg_shdr.request.request_length = length; + sli4_config->header.cfg_shdr.request.request_length = + length - LPFC_MBX_CMD_HDR_LENGTH; return length; } @@ -1902,6 +1903,7 @@ lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) /* Set up host requested features. */ bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); + bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1); /* Enable DIF (block guard) only if configured to do so. */ if (phba->cfg_enable_bg) @@ -2159,17 +2161,16 @@ lpfc_supported_pages(struct lpfcMboxq *mbox) } /** - * lpfc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params - * mailbox command. + * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd. * @mbox: pointer to lpfc mbox command to initialize. * * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to * retrieve the particular SLI4 features supported by the port. **/ void -lpfc_sli4_params(struct lpfcMboxq *mbox) +lpfc_pc_sli4_params(struct lpfcMboxq *mbox) { - struct lpfc_mbx_sli4_params *sli4_params; + struct lpfc_mbx_pc_sli4_params *sli4_params; memset(mbox, 0, sizeof(*mbox)); sli4_params = &mbox->u.mqe.un.sli4_params; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index d85a7423a694..52b35159fc35 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -350,7 +350,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; - /* no need to reg_login if we are already in one of these states */ + /* + * Need to unreg_login if we are already in one of these states and + * change to NPR state. This will block the port until after the ACC + * completes and the reg_login is issued and completed. + */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) @@ -359,8 +363,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, case NLP_STE_PRLI_ISSUE: case NLP_STE_UNMAPPED_NODE: case NLP_STE_MAPPED_NODE: - lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); - return 1; + lpfc_unreg_rpi(vport, ndlp); + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } if ((vport->fc_flag & FC_PT2PT) && diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c97751c95d77..bf34178b80bf 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -609,6 +609,32 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) } /** + * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport + * @vport: pointer to lpfc vport data structure. + * + * This routine is invoked by the vport cleanup for deletions and the cleanup + * for an ndlp on removal. + **/ +void +lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_scsi_buf *psb, *next_psb; + unsigned long iflag = 0; + + spin_lock_irqsave(&phba->hbalock, iflag); + spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); + list_for_each_entry_safe(psb, next_psb, + &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { + if (psb->rdata && psb->rdata->pnode + && psb->rdata->pnode->vport == vport) + psb->rdata = NULL; + } + spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); +} + +/** * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort * @phba: pointer to lpfc hba data structure. * @axri: pointer to the fcp xri abort wcqe structure. @@ -640,7 +666,11 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, psb->status = IOSTAT_SUCCESS; spin_unlock( &phba->sli4_hba.abts_scsi_buf_list_lock); - ndlp = psb->rdata->pnode; + if (psb->rdata && psb->rdata->pnode) + ndlp = psb->rdata->pnode; + else + ndlp = NULL; + rrq_empty = list_empty(&phba->active_rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflag); if (ndlp) @@ -964,36 +994,29 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { - struct lpfc_scsi_buf *lpfc_cmd = NULL; - struct lpfc_scsi_buf *start_lpfc_cmd = NULL; - struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; + struct lpfc_scsi_buf *lpfc_cmd ; unsigned long iflag = 0; int found = 0; spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); - while (!found && lpfc_cmd) { + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, + list) { if (lpfc_test_rrq_active(phba, ndlp, - lpfc_cmd->cur_iocbq.sli4_xritag)) { - lpfc_release_scsi_buf_s4(phba, lpfc_cmd); - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, - struct lpfc_scsi_buf, list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, - iflag); - if (lpfc_cmd == start_lpfc_cmd) { - lpfc_cmd = NULL; - break; - } else - continue; - } + lpfc_cmd->cur_iocbq.sli4_xritag)) + continue; + list_del(&lpfc_cmd->list); found = 1; lpfc_cmd->seg_cnt = 0; lpfc_cmd->nonsg_phys = 0; lpfc_cmd->prot_seg_cnt = 0; + break; } - return lpfc_cmd; + spin_unlock_irqrestore(&phba->scsi_buf_list_lock, + iflag); + if (!found) + return NULL; + else + return lpfc_cmd; } /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA @@ -1981,12 +2004,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) struct scatterlist *sgel = NULL; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; + struct sli4_sge *first_data_sgl; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; dma_addr_t physaddr; uint32_t num_bde = 0; uint32_t dma_len; uint32_t dma_offset = 0; int nseg; + struct ulp_bde64 *bde; /* * There are three possibilities here - use scatter-gather segment, use @@ -2011,7 +2036,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) bf_set(lpfc_sli4_sge_last, sgl, 0); sgl->word2 = cpu_to_le32(sgl->word2); sgl += 1; - + first_data_sgl = sgl; lpfc_cmd->seg_cnt = nseg; if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" @@ -2047,6 +2072,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) dma_offset += dma_len; sgl++; } + /* setup the performance hint (first data BDE) if enabled */ + if (phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) { + bde = (struct ulp_bde64 *) + &(iocb_cmd->unsli3.sli3Words[5]); + bde->addrLow = first_data_sgl->addr_lo; + bde->addrHigh = first_data_sgl->addr_hi; + bde->tus.f.bdeSize = + le32_to_cpu(first_data_sgl->sge_len); + bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bde->tus.w = cpu_to_le32(bde->tus.w); + } } else { sgl += 1; /* clear the last flag in the fcp_rsp map entry */ @@ -2471,6 +2507,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_worker_wake_up(phba); break; case IOSTAT_LOCAL_REJECT: + case IOSTAT_REMOTE_STOP: + if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR || + lpfc_cmd->result == + IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR || + lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || + lpfc_cmd->result == + IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { + cmd->result = ScsiResult(DID_NO_CONNECT, 0); + break; + } if (lpfc_cmd->result == IOERR_INVALID_RPI || lpfc_cmd->result == IOERR_NO_RESOURCES || lpfc_cmd->result == IOERR_ABORT_REQUESTED || @@ -2478,7 +2524,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, cmd->result = ScsiResult(DID_REQUEUE, 0); break; } - if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || lpfc_cmd->result == IOERR_TX_DMA_FAILED) && pIocbOut->iocb.unsli3.sli3_bg.bgstat) { @@ -2497,7 +2542,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, "on unprotected cmd\n"); } } - + if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP) + && (phba->sli_rev == LPFC_SLI_REV4) + && (pnode && NLP_CHK_NODE_ACT(pnode))) { + /* This IO was aborted by the target, we don't + * know the rxid and because we did not send the + * ABTS we cannot generate and RRQ. + */ + lpfc_set_rrq_active(phba, pnode, + lpfc_cmd->cur_iocbq.sli4_xritag, + 0, 0); + } /* else: fall through */ default: cmd->result = ScsiResult(DID_ERROR, 0); @@ -2508,9 +2563,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, SAM_STAT_BUSY); - } else { + } else cmd->result = ScsiResult(DID_OK, 0); - } if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { uint32_t *lp = (uint32_t *)cmd->sense_buffer; @@ -3004,11 +3058,11 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) * transport is still transitioning. */ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); + cmnd->result = ScsiResult(DID_IMM_RETRY, 0); goto out_fail_command; } if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) - goto out_host_busy; + goto out_tgt_busy; lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); if (lpfc_cmd == NULL) { @@ -3125,6 +3179,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) out_host_busy: return SCSI_MLQUEUE_HOST_BUSY; + out_tgt_busy: + return SCSI_MLQUEUE_TARGET_BUSY; + out_fail_command: done(cmnd); return 0; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index a359d2b873ce..2ee0374a9908 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -96,7 +96,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) /* set consumption flag every once in a while */ if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); - + if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) + bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); /* Update the host index before invoking device */ @@ -534,15 +535,35 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint16_t adj_xri; struct lpfc_node_rrq *rrq; int empty; + uint32_t did = 0; + + + if (!ndlp) + return -EINVAL; + + if (!phba->cfg_enable_rrq) + return -EINVAL; + + if (phba->pport->load_flag & FC_UNLOADING) { + phba->hba_flag &= ~HBA_RRQ_ACTIVE; + goto out; + } + did = ndlp->nlp_DID; /* * set the active bit even if there is no mem available. */ adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; - if (!ndlp) - return -EINVAL; + + if (NLP_CHK_FREE_REQ(ndlp)) + goto out; + + if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) + goto out; + if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) - return -EINVAL; + goto out; + rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); if (rrq) { rrq->send_rrq = send_rrq; @@ -553,14 +574,7 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, rrq->vport = ndlp->vport; rrq->rxid = rxid; empty = list_empty(&phba->active_rrq_list); - if (phba->cfg_enable_rrq && send_rrq) - /* - * We need the xri before we can add this to the - * phba active rrq list. - */ - rrq->send_rrq = send_rrq; - else - rrq->send_rrq = 0; + rrq->send_rrq = send_rrq; list_add_tail(&rrq->list, &phba->active_rrq_list); if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) { phba->hba_flag |= HBA_RRQ_ACTIVE; @@ -569,40 +583,49 @@ __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, } return 0; } - return -ENOMEM; +out: + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2921 Can't set rrq active xri:0x%x rxid:0x%x" + " DID:0x%x Send:%d\n", + xritag, rxid, did, send_rrq); + return -EINVAL; } /** - * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. + * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. * @phba: Pointer to HBA context object. * @xritag: xri used in this exchange. * @rrq: The RRQ to be cleared. * - * This function is called with hbalock held. This function **/ -static void -__lpfc_clr_rrq_active(struct lpfc_hba *phba, - uint16_t xritag, - struct lpfc_node_rrq *rrq) +void +lpfc_clr_rrq_active(struct lpfc_hba *phba, + uint16_t xritag, + struct lpfc_node_rrq *rrq) { uint16_t adj_xri; - struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *ndlp = NULL; - ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); + if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) + ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); /* The target DID could have been swapped (cable swap) * we should use the ndlp from the findnode if it is * available. */ - if (!ndlp) + if ((!ndlp) && rrq->ndlp) ndlp = rrq->ndlp; + if (!ndlp) + goto out; + adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) { rrq->send_rrq = 0; rrq->xritag = 0; rrq->rrq_stop_time = 0; } +out: mempool_free(rrq, phba->rrq_pool); } @@ -627,34 +650,34 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) struct lpfc_node_rrq *nextrrq; unsigned long next_time; unsigned long iflags; + LIST_HEAD(send_rrq); spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; next_time = jiffies + HZ * (phba->fc_ratov + 1); list_for_each_entry_safe(rrq, nextrrq, - &phba->active_rrq_list, list) { - if (time_after(jiffies, rrq->rrq_stop_time)) { - list_del(&rrq->list); - if (!rrq->send_rrq) - /* this call will free the rrq */ - __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); - else { - /* if we send the rrq then the completion handler - * will clear the bit in the xribitmap. - */ - spin_unlock_irqrestore(&phba->hbalock, iflags); - if (lpfc_send_rrq(phba, rrq)) { - lpfc_clr_rrq_active(phba, rrq->xritag, - rrq); - } - spin_lock_irqsave(&phba->hbalock, iflags); - } - } else if (time_before(rrq->rrq_stop_time, next_time)) + &phba->active_rrq_list, list) { + if (time_after(jiffies, rrq->rrq_stop_time)) + list_move(&rrq->list, &send_rrq); + else if (time_before(rrq->rrq_stop_time, next_time)) next_time = rrq->rrq_stop_time; } spin_unlock_irqrestore(&phba->hbalock, iflags); if (!list_empty(&phba->active_rrq_list)) mod_timer(&phba->rrq_tmr, next_time); + list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { + list_del(&rrq->list); + if (!rrq->send_rrq) + /* this call will free the rrq */ + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + else if (lpfc_send_rrq(phba, rrq)) { + /* if we send the rrq then the completion handler + * will clear the bit in the xribitmap. + */ + lpfc_clr_rrq_active(phba, rrq->xritag, + rrq); + } + } } /** @@ -692,29 +715,37 @@ lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) /** * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. * @vport: Pointer to vport context object. - * - * Remove all active RRQs for this vport from the phba->active_rrq_list and - * clear the rrq. + * @ndlp: Pointer to the lpfc_node_list structure. + * If ndlp is NULL Remove all active RRQs for this vport from the + * phba->active_rrq_list and clear the rrq. + * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. **/ void -lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport) +lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; struct lpfc_node_rrq *rrq; struct lpfc_node_rrq *nextrrq; unsigned long iflags; + LIST_HEAD(rrq_list); if (phba->sli_rev != LPFC_SLI_REV4) return; - spin_lock_irqsave(&phba->hbalock, iflags); - list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { - if (rrq->vport == vport) { - list_del(&rrq->list); - __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); - } + if (!ndlp) { + lpfc_sli4_vport_delete_els_xri_aborted(vport); + lpfc_sli4_vport_delete_fcp_xri_aborted(vport); } + spin_lock_irqsave(&phba->hbalock, iflags); + list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) + if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) + list_move(&rrq->list, &rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflags); + + list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { + list_del(&rrq->list); + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + } } /** @@ -732,24 +763,27 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) struct lpfc_node_rrq *nextrrq; unsigned long next_time; unsigned long iflags; + LIST_HEAD(rrq_list); if (phba->sli_rev != LPFC_SLI_REV4) return; spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; next_time = jiffies + HZ * (phba->fc_ratov * 2); - list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { + list_splice_init(&phba->active_rrq_list, &rrq_list); + spin_unlock_irqrestore(&phba->hbalock, iflags); + + list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { list_del(&rrq->list); - __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); + lpfc_clr_rrq_active(phba, rrq->xritag, rrq); } - spin_unlock_irqrestore(&phba->hbalock, iflags); if (!list_empty(&phba->active_rrq_list)) mod_timer(&phba->rrq_tmr, next_time); } /** - * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. + * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. * @phba: Pointer to HBA context object. * @ndlp: Targets nodelist pointer for this exchange. * @xritag the xri in the bitmap to test. @@ -758,8 +792,8 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) * returns 0 = rrq not active for this xri * 1 = rrq is valid for this xri. **/ -static int -__lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, +int +lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, uint16_t xritag) { uint16_t adj_xri; @@ -802,52 +836,6 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, } /** - * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. - * @phba: Pointer to HBA context object. - * @xritag: xri used in this exchange. - * @rrq: The RRQ to be cleared. - * - * This function is takes the hbalock. - **/ -void -lpfc_clr_rrq_active(struct lpfc_hba *phba, - uint16_t xritag, - struct lpfc_node_rrq *rrq) -{ - unsigned long iflags; - - spin_lock_irqsave(&phba->hbalock, iflags); - __lpfc_clr_rrq_active(phba, xritag, rrq); - spin_unlock_irqrestore(&phba->hbalock, iflags); - return; -} - - - -/** - * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. - * @phba: Pointer to HBA context object. - * @ndlp: Targets nodelist pointer for this exchange. - * @xritag the xri in the bitmap to test. - * - * This function takes the hbalock. - * returns 0 = rrq not active for this xri - * 1 = rrq is valid for this xri. - **/ -int -lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, - uint16_t xritag) -{ - int ret; - unsigned long iflags; - - spin_lock_irqsave(&phba->hbalock, iflags); - ret = __lpfc_test_rrq_active(phba, ndlp, xritag); - spin_unlock_irqrestore(&phba->hbalock, iflags); - return ret; -} - -/** * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool * @phba: Pointer to HBA context object. * @piocb: Pointer to the iocbq. @@ -884,7 +872,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) return NULL; adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; - if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { + if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { /* This xri has an rrq outstanding for this DID. * put it back in the list and get another xri. */ @@ -969,7 +957,8 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) } else { sglq->state = SGL_FREED; sglq->ndlp = NULL; - list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); + list_add_tail(&sglq->list, + &phba->sli4_hba.lpfc_sgl_list); /* Check if TXQ queue needs to be serviced */ if (pring->txq_cnt) @@ -4817,7 +4806,10 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) "0378 No support for fcpi mode.\n"); ftr_rsp++; } - + if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) + phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; + else + phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; /* * If the port cannot support the host's requested features * then turn off the global config parameters to disable the @@ -5004,7 +4996,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); phba->link_state = LPFC_LINK_DOWN; spin_unlock_irq(&phba->hbalock); - rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); + if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) + rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); out_unset_queue: /* Unset all the queues set up in this routine when error out */ if (rc) @@ -10478,6 +10471,7 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, cq->type = type; cq->subtype = subtype; cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); + cq->assoc_qid = eq->queue_id; cq->host_index = 0; cq->hba_index = 0; @@ -10672,6 +10666,7 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, goto out; } mq->type = LPFC_MQ; + mq->assoc_qid = cq->queue_id; mq->subtype = subtype; mq->host_index = 0; mq->hba_index = 0; @@ -10759,6 +10754,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, goto out; } wq->type = LPFC_WQ; + wq->assoc_qid = cq->queue_id; wq->subtype = subtype; wq->host_index = 0; wq->hba_index = 0; @@ -10876,6 +10872,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, goto out; } hrq->type = LPFC_HRQ; + hrq->assoc_qid = cq->queue_id; hrq->subtype = subtype; hrq->host_index = 0; hrq->hba_index = 0; @@ -10936,6 +10933,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, goto out; } drq->type = LPFC_DRQ; + drq->assoc_qid = cq->queue_id; drq->subtype = subtype; drq->host_index = 0; drq->hba_index = 0; @@ -11189,7 +11187,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, if (!mbox) return -ENOMEM; length = (sizeof(struct lpfc_mbx_rq_destroy) - - sizeof(struct mbox_header)); + sizeof(struct lpfc_sli4_cfg_mhdr)); lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, length, LPFC_SLI4_MBX_EMBED); @@ -11279,7 +11277,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba, lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, sizeof(struct lpfc_mbx_post_sgl_pages) - - sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); + sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) &mbox->u.mqe.un.post_sgl_pages; @@ -12402,7 +12400,8 @@ lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, sizeof(struct lpfc_mbx_post_hdr_tmpl) - - sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); + sizeof(struct lpfc_sli4_cfg_mhdr), + LPFC_SLI4_MBX_EMBED); bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, hdr_tmpl, rpi_page->page_count); bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index c7217d579e0f..595056b89608 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -125,9 +125,9 @@ struct lpfc_queue { uint32_t entry_count; /* Number of entries to support on the queue */ uint32_t entry_size; /* Size of each queue entry. */ uint32_t queue_id; /* Queue ID assigned by the hardware */ + uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ struct list_head page_list; uint32_t page_count; /* Number of pages allocated for this queue */ - uint32_t host_index; /* The host's index for putting or getting */ uint32_t hba_index; /* The last known hba index for get or put */ union sli4_qe qe[1]; /* array to index entries (must be last) */ @@ -359,6 +359,10 @@ struct lpfc_pc_sli4_params { uint32_t hdr_pp_align; uint32_t sgl_pages_max; uint32_t sgl_pp_align; + uint8_t cqv; + uint8_t mqv; + uint8_t wqv; + uint8_t rqv; }; /* SLI4 HBA data structure entries */ @@ -562,6 +566,8 @@ void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, struct sli4_wcqe_xri_aborted *); void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, struct sli4_wcqe_xri_aborted *); +void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *); +void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *); int lpfc_sli4_brdreset(struct lpfc_hba *); int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 386cf92de492..0a4d376dbca5 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2010 Emulex. All rights reserved. * + * Copyright (C) 2004-2011 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.20" +#define LPFC_DRIVER_VERSION "8.3.21" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 6b8d2952e32f..30ba5440c67a 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -464,6 +464,7 @@ disable_vport(struct fc_vport *fc_vport) struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL; long timeout; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); ndlp = lpfc_findnode_did(vport, Fabric_DID); if (ndlp && NLP_CHK_NODE_ACT(ndlp) @@ -498,6 +499,9 @@ disable_vport(struct fc_vport *fc_vport) * scsi_host_put() to release the vport. */ lpfc_mbx_unreg_vpi(vport); + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + spin_unlock_irq(shost->host_lock); lpfc_vport_set_state(vport, FC_VPORT_DISABLED); lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 9aa048525eb2..c212694a9714 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -1412,7 +1412,7 @@ megaraid_isr_memmapped(int irq, void *devp) * @nstatus - number of completed commands * @status - status of the last command completed * - * Complete the comamnds and call the scsi mid-layer callback hooks. + * Complete the commands and call the scsi mid-layer callback hooks. */ static void mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) @@ -4296,7 +4296,7 @@ mega_support_cluster(adapter_t *adapter) * @adapter - pointer to our soft state * @dma_handle - DMA address of the buffer * - * Issue internal comamnds while interrupts are available. + * Issue internal commands while interrupts are available. * We only issue direct mailbox commands from within the driver. ioctl() * interface using these routines can issue passthru commands. */ diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 1b5e375732c0..635b228c3ead 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,9 +33,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "00.00.05.29-rc1" -#define MEGASAS_RELDATE "Dec. 7, 2010" -#define MEGASAS_EXT_VERSION "Tue. Dec. 7 17:00:00 PDT 2010" +#define MEGASAS_VERSION "00.00.05.34-rc1" +#define MEGASAS_RELDATE "Feb. 24, 2011" +#define MEGASAS_EXT_VERSION "Thu. Feb. 24 17:00:00 PDT 2011" /* * Device IDs @@ -723,6 +723,7 @@ struct megasas_ctrl_info { MEGASAS_MAX_DEV_PER_CHANNEL) #define MEGASAS_MAX_SECTORS (2*1024) +#define MEGASAS_MAX_SECTORS_IEEE (2*128) #define MEGASAS_DBG_LVL 1 #define MEGASAS_FW_BUSY 1 @@ -1477,4 +1478,7 @@ struct megasas_mgmt_info { int max_index; }; +#define msi_control_reg(base) (base + PCI_MSI_FLAGS) +#define PCI_MSIX_FLAGS_ENABLE (1 << 15) + #endif /*LSI_MEGARAID_SAS_H */ diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 5d6d07bd1cd0..bbd10c81fd9c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -18,12 +18,13 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c - * Version : v00.00.05.29-rc1 + * Version : v00.00.05.34-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote * Sumant Patro * Bo Yang + * Adam Radford <linuxraid@lsi.com> * * Send feedback to: <megaraidlinux@lsi.com> * @@ -134,7 +135,11 @@ spinlock_t poll_aen_lock; void megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, u8 alt_status); - +static u32 +megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs); +static int +megasas_adp_reset_gen2(struct megasas_instance *instance, + struct megasas_register_set __iomem *reg_set); static irqreturn_t megasas_isr(int irq, void *devp); static u32 megasas_init_adapter_mfi(struct megasas_instance *instance); @@ -554,6 +559,8 @@ static int megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) { u32 status; + u32 mfiStatus = 0; + /* * Check if it is our interrupt */ @@ -564,6 +571,15 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) } /* + * Check if it is our interrupt + */ + if ((megasas_read_fw_status_reg_gen2(regs) & MFI_STATE_MASK) == + MFI_STATE_FAULT) { + mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE; + } else + mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE; + + /* * Clear the interrupt by writing back the same value */ writel(status, ®s->outbound_intr_status); @@ -573,7 +589,7 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs) */ readl(®s->outbound_intr_status); - return 1; + return mfiStatus; } /** @@ -597,17 +613,6 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance, } /** - * megasas_adp_reset_skinny - For controller reset - * @regs: MFI register set - */ -static int -megasas_adp_reset_skinny(struct megasas_instance *instance, - struct megasas_register_set __iomem *regs) -{ - return 0; -} - -/** * megasas_check_reset_skinny - For controller reset check * @regs: MFI register set */ @@ -625,7 +630,7 @@ static struct megasas_instance_template megasas_instance_template_skinny = { .disable_intr = megasas_disable_intr_skinny, .clear_intr = megasas_clear_intr_skinny, .read_fw_status_reg = megasas_read_fw_status_reg_skinny, - .adp_reset = megasas_adp_reset_skinny, + .adp_reset = megasas_adp_reset_gen2, .check_reset = megasas_check_reset_skinny, .service_isr = megasas_isr, .tasklet = megasas_complete_cmd_dpc, @@ -740,20 +745,28 @@ megasas_adp_reset_gen2(struct megasas_instance *instance, { u32 retry = 0 ; u32 HostDiag; + u32 *seq_offset = ®_set->seq_offset; + u32 *hostdiag_offset = ®_set->host_diag; + + if (instance->instancet == &megasas_instance_template_skinny) { + seq_offset = ®_set->fusion_seq_offset; + hostdiag_offset = ®_set->fusion_host_diag; + } + + writel(0, seq_offset); + writel(4, seq_offset); + writel(0xb, seq_offset); + writel(2, seq_offset); + writel(7, seq_offset); + writel(0xd, seq_offset); - writel(0, ®_set->seq_offset); - writel(4, ®_set->seq_offset); - writel(0xb, ®_set->seq_offset); - writel(2, ®_set->seq_offset); - writel(7, ®_set->seq_offset); - writel(0xd, ®_set->seq_offset); msleep(1000); - HostDiag = (u32)readl(®_set->host_diag); + HostDiag = (u32)readl(hostdiag_offset); while ( !( HostDiag & DIAG_WRITE_ENABLE) ) { msleep(100); - HostDiag = (u32)readl(®_set->host_diag); + HostDiag = (u32)readl(hostdiag_offset); printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); @@ -764,14 +777,14 @@ megasas_adp_reset_gen2(struct megasas_instance *instance, printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); - writel((HostDiag | DIAG_RESET_ADAPTER), ®_set->host_diag); + writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); ssleep(10); - HostDiag = (u32)readl(®_set->host_diag); + HostDiag = (u32)readl(hostdiag_offset); while ( ( HostDiag & DIAG_RESET_ADAPTER) ) { msleep(100); - HostDiag = (u32)readl(®_set->host_diag); + HostDiag = (u32)readl(hostdiag_offset); printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n", retry, HostDiag); @@ -877,7 +890,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, * @instance: Adapter soft state * @cmd_to_abort: Previously issued cmd to be aborted * - * MFI firmware can abort previously issued AEN comamnd (automatic event + * MFI firmware can abort previously issued AEN command (automatic event * notification). The megasas_issue_blocked_abort_cmd() issues such abort * cmd and waits for return status. * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs @@ -2503,7 +2516,9 @@ megasas_deplete_reply_queue(struct megasas_instance *instance, if ((mfiStatus = instance->instancet->clear_intr( instance->reg_set) ) == 0) { - return IRQ_NONE; + /* Hardware may not set outbound_intr_status in MSI-X mode */ + if (!instance->msi_flag) + return IRQ_NONE; } instance->mfiStatus = mfiStatus; @@ -2611,7 +2626,9 @@ megasas_transition_to_ready(struct megasas_instance* instance) case MFI_STATE_FAULT: printk(KERN_DEBUG "megasas: FW in FAULT state!!\n"); - return -ENODEV; + max_wait = MEGASAS_RESET_WAIT_TIME; + cur_state = MFI_STATE_FAULT; + break; case MFI_STATE_WAIT_HANDSHAKE: /* @@ -3424,7 +3441,6 @@ fail_reply_queue: megasas_free_cmds(instance); fail_alloc_cmds: - iounmap(instance->reg_set); return 1; } @@ -3494,7 +3510,7 @@ static int megasas_init_fw(struct megasas_instance *instance) /* Get operational params, sge flags, send init cmd to controller */ if (instance->instancet->init_adapter(instance)) - return -ENODEV; + goto fail_init_adapter; printk(KERN_ERR "megasas: INIT adapter done\n"); @@ -3543,7 +3559,7 @@ static int megasas_init_fw(struct megasas_instance *instance) * Setup tasklet for cmd completion */ - tasklet_init(&instance->isr_tasklet, megasas_complete_cmd_dpc, + tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); /* Initialize the cmd completion timer */ @@ -3553,6 +3569,7 @@ static int megasas_init_fw(struct megasas_instance *instance) MEGASAS_COMPLETION_TIMER_INTERVAL); return 0; +fail_init_adapter: fail_ready_state: iounmap(instance->reg_set); @@ -3820,6 +3837,10 @@ static int megasas_io_attach(struct megasas_instance *instance) instance->max_fw_cmds - MEGASAS_INT_CMDS; host->this_id = instance->init_id; host->sg_tablesize = instance->max_num_sge; + + if (instance->fw_support_ieee) + instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE; + /* * Check if the module parameter value for max_sectors can be used */ @@ -3899,9 +3920,26 @@ fail_set_dma_mask: static int __devinit megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { - int rval; + int rval, pos; struct Scsi_Host *host; struct megasas_instance *instance; + u16 control = 0; + + /* Reset MSI-X in the kdump kernel */ + if (reset_devices) { + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); + if (pos) { + pci_read_config_word(pdev, msi_control_reg(pos), + &control); + if (control & PCI_MSIX_FLAGS_ENABLE) { + dev_info(&pdev->dev, "resetting MSI-X\n"); + pci_write_config_word(pdev, + msi_control_reg(pos), + control & + ~PCI_MSIX_FLAGS_ENABLE); + } + } + } /* * Announce PCI information @@ -4039,12 +4077,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) else INIT_WORK(&instance->work_init, process_fw_state_change_wq); - /* - * Initialize MFI Firmware - */ - if (megasas_init_fw(instance)) - goto fail_init_mfi; - /* Try to enable MSI-X */ if ((instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078R) && (instance->pdev->device != PCI_DEVICE_ID_LSI_SAS1078DE) && @@ -4054,6 +4086,12 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) instance->msi_flag = 1; /* + * Initialize MFI Firmware + */ + if (megasas_init_fw(instance)) + goto fail_init_mfi; + + /* * Register IRQ */ if (request_irq(instance->msi_flag ? instance->msixentry.vector : @@ -4105,24 +4143,23 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) instance->instancet->disable_intr(instance->reg_set); free_irq(instance->msi_flag ? instance->msixentry.vector : instance->pdev->irq, instance); +fail_irq: + if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) + megasas_release_fusion(instance); + else + megasas_release_mfi(instance); + fail_init_mfi: if (instance->msi_flag) pci_disable_msix(instance->pdev); - - fail_irq: - fail_init_mfi: fail_alloc_dma_buf: if (instance->evt_detail) pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), instance->evt_detail, instance->evt_detail_h); - if (instance->producer) { + if (instance->producer) pci_free_consistent(pdev, sizeof(u32), instance->producer, instance->producer_h); - megasas_release_mfi(instance); - } else { - megasas_release_fusion(instance); - } if (instance->consumer) pci_free_consistent(pdev, sizeof(u32), instance->consumer, instance->consumer_h); @@ -4242,9 +4279,8 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) /* cancel the delayed work if this work still in queue */ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; - cancel_delayed_work( + cancel_delayed_work_sync( (struct delayed_work *)&ev->hotplug_work); - flush_scheduled_work(); instance->ev = NULL; } @@ -4297,6 +4333,10 @@ megasas_resume(struct pci_dev *pdev) if (megasas_set_dma_mask(pdev)) goto fail_set_dma_mask; + /* Now re-enable MSI-X */ + if (instance->msi_flag) + pci_enable_msix(instance->pdev, &instance->msixentry, 1); + /* * Initialize MFI Firmware */ @@ -4333,10 +4373,6 @@ megasas_resume(struct pci_dev *pdev) tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, (unsigned long)instance); - /* Now re-enable MSI-X */ - if (instance->msi_flag) - pci_enable_msix(instance->pdev, &instance->msixentry, 1); - /* * Register IRQ */ @@ -4417,9 +4453,8 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) /* cancel the delayed work if this work still in queue*/ if (instance->ev != NULL) { struct megasas_aen_event *ev = instance->ev; - cancel_delayed_work( + cancel_delayed_work_sync( (struct delayed_work *)&ev->hotplug_work); - flush_scheduled_work(); instance->ev = NULL; } @@ -4611,6 +4646,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < ioc->sge_count; i++) { + if (!ioc->sgl[i].iov_len) + continue; + kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev, ioc->sgl[i].iov_len, &buf_handle, GFP_KERNEL); @@ -5177,6 +5215,7 @@ megasas_aen_polling(struct work_struct *work) break; case MR_EVT_LD_OFFLINE: + case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: megasas_get_ld_list(instance); for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index d6e2a663b165..145a8cffb1fa 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -81,6 +81,10 @@ u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); + +void +megasas_check_and_restore_queue_depth(struct megasas_instance *instance); + u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo); u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, @@ -983,13 +987,15 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) return 0; -fail_alloc_cmds: -fail_alloc_mfi_cmds: fail_map_info: if (i == 1) dma_free_coherent(&instance->pdev->dev, fusion->map_sz, fusion->ld_map[0], fusion->ld_map_phys[0]); fail_ioc_init: + megasas_free_cmds_fusion(instance); +fail_alloc_cmds: + megasas_free_cmds(instance); +fail_alloc_mfi_cmds: return 1; } @@ -1431,8 +1437,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; /* Check if this is a system PD I/O */ - if ((instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) && - (instance->pd_list[pd_index].driveType == TYPE_DISK)) { + if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { io_request->Function = 0; io_request->DevHandle = local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; @@ -1455,7 +1460,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); } io_request->RaidContext.VirtualDiskTgtId = device_id; - io_request->LUN[0] = scmd->device->lun; + io_request->LUN[1] = scmd->device->lun; io_request->DataLength = scsi_bufflen(scmd); } @@ -1479,7 +1484,7 @@ megasas_build_io_fusion(struct megasas_instance *instance, device_id = MEGASAS_DEV_INDEX(instance, scp); /* Zero out some fields so they don't get reused */ - io_request->LUN[0] = 0; + io_request->LUN[1] = 0; io_request->CDB.EEDP32.PrimaryReferenceTag = 0; io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0; io_request->EEDPFlags = 0; @@ -1743,7 +1748,7 @@ complete_cmd_fusion(struct megasas_instance *instance) wmb(); writel(fusion->last_reply_idx, &instance->reg_set->reply_post_host_index); - + megasas_check_and_restore_queue_depth(instance); return IRQ_HANDLED; } diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h index 8be75e65f763..a3e60385787f 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2.h @@ -8,7 +8,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.16 + * mpi2.h Version: 02.00.17 * * Version History * --------------- @@ -63,6 +63,7 @@ * function codes, 0xF0 to 0xFF. * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. * Added alternative defines for the SGE Direction bit. + * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. * -------------------------------------------------------------------------- */ @@ -88,7 +89,7 @@ #define MPI2_VERSION_02_00 (0x0200) /* versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x10) +#define MPI2_HEADER_VERSION_UNIT (0x11) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h index d76a65847603..f5b9c766e28f 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h @@ -6,7 +6,7 @@ * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.15 + * mpi2_cnfg.h Version: 02.00.16 * * Version History * --------------- @@ -125,6 +125,8 @@ * define. * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. + * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) + * defines. * -------------------------------------------------------------------------- */ @@ -745,8 +747,6 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 #define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040) #define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) #define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004) -#define MPI2_IOUNITPAGE1_MULTI_PATHING (0x00000002) -#define MPI2_IOUNITPAGE1_SINGLE_PATHING (0x00000000) /* IO Unit Page 3 */ diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt b/drivers/scsi/mpt2sas/mpi/mpi2_history.txt deleted file mode 100644 index b1e88f26b748..000000000000 --- a/drivers/scsi/mpt2sas/mpi/mpi2_history.txt +++ /dev/null @@ -1,384 +0,0 @@ - ============================== - Fusion-MPT MPI 2.0 Header File Change History - ============================== - - Copyright (c) 2000-2010 LSI Corporation. - - --------------------------------------- - Header Set Release Version: 02.00.14 - Header Set Release Date: 10-28-09 - --------------------------------------- - - Filename Current version Prior version - ---------- --------------- ------------- - mpi2.h 02.00.14 02.00.13 - mpi2_cnfg.h 02.00.13 02.00.12 - mpi2_init.h 02.00.08 02.00.07 - mpi2_ioc.h 02.00.13 02.00.12 - mpi2_raid.h 02.00.04 02.00.04 - mpi2_sas.h 02.00.03 02.00.02 - mpi2_targ.h 02.00.03 02.00.03 - mpi2_tool.h 02.00.04 02.00.04 - mpi2_type.h 02.00.00 02.00.00 - mpi2_ra.h 02.00.00 02.00.00 - mpi2_hbd.h 02.00.00 - mpi2_history.txt 02.00.14 02.00.13 - - - * Date Version Description - * -------- -------- ------------------------------------------------------ - -mpi2.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. - * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. - * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. - * Moved ReplyPostHostIndex register to offset 0x6C of the - * MPI2_SYSTEM_INTERFACE_REGS and modified the define for - * MPI2_REPLY_POST_HOST_INDEX_OFFSET. - * Added union of request descriptors. - * Added union of reply descriptors. - * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. - * Added define for MPI2_VERSION_02_00. - * Fixed the size of the FunctionDependent5 field in the - * MPI2_DEFAULT_REPLY structure. - * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. - * Removed the MPI-defined Fault Codes and extended the - * product specific codes up to 0xEFFF. - * Added a sixth key value for the WriteSequence register - * and changed the flush value to 0x0. - * Added message function codes for Diagnostic Buffer Post - * and Diagnsotic Release. - * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED - * Moved MPI2_VERSION_UNION from mpi2_ioc.h. - * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. - * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. - * Added #defines for marking a reply descriptor as unused. - * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. - * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. - * Moved LUN field defines from mpi2_init.h. - * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. - * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. - * In all request and reply descriptors, replaced VF_ID - * field with MSIxIndex field. - * Removed DevHandle field from - * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those - * bytes reserved. - * Added RAID Accelerator functionality. - * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. - * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. - * Added MSI-x index mask and shift for Reply Post Host - * Index register. - * Added function code for Host Based Discovery Action. - * -------------------------------------------------------------------------- - -mpi2_cnfg.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. - * Added Manufacturing Page 11. - * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE - * define. - * 06-26-07 02.00.02 Adding generic structure for product-specific - * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. - * Rework of BIOS Page 2 configuration page. - * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the - * forms. - * Added configuration pages IOC Page 8 and Driver - * Persistent Mapping Page 0. - * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated - * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, - * RAID Physical Disk Pages 0 and 1, RAID Configuration - * Page 0). - * Added new value for AccessStatus field of SAS Device - * Page 0 (_SATA_NEEDS_INITIALIZATION). - * 10-31-07 02.00.04 Added missing SEPDevHandle field to - * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. - * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for - * NVDATA. - * Modified IOC Page 7 to use masks and added field for - * SASBroadcastPrimitiveMasks. - * Added MPI2_CONFIG_PAGE_BIOS_4. - * Added MPI2_CONFIG_PAGE_LOG_0. - * 02-29-08 02.00.06 Modified various names to make them 32-character unique. - * Added SAS Device IDs. - * Updated Integrated RAID configuration pages including - * Manufacturing Page 4, IOC Page 6, and RAID Configuration - * Page 0. - * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. - * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. - * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. - * Added missing MaxNumRoutedSasAddresses field to - * MPI2_CONFIG_PAGE_EXPANDER_0. - * Added SAS Port Page 0. - * Modified structure layout for - * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. - * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use - * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. - * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF - * to 0x000000FF. - * Added two new values for the Physical Disk Coercion Size - * bits in the Flags field of Manufacturing Page 4. - * Added product-specific Manufacturing pages 16 to 31. - * Modified Flags bits for controlling write cache on SATA - * drives in IO Unit Page 1. - * Added new bit to AdditionalControlFlags of SAS IO Unit - * Page 1 to control Invalid Topology Correction. - * Added SupportedPhysDisks field to RAID Volume Page 1 and - * added related defines. - * Added additional defines for RAID Volume Page 0 - * VolumeStatusFlags field. - * Modified meaning of RAID Volume Page 0 VolumeSettings - * define for auto-configure of hot-swap drives. - * Added PhysDiskAttributes field (and related defines) to - * RAID Physical Disk Page 0. - * Added MPI2_SAS_PHYINFO_PHY_VACANT define. - * Added three new DiscoveryStatus bits for SAS IO Unit - * Page 0 and SAS Expander Page 0. - * Removed multiplexing information from SAS IO Unit pages. - * Added BootDeviceWaitTime field to SAS IO Unit Page 4. - * Removed Zone Address Resolved bit from PhyInfo and from - * Expander Page 0 Flags field. - * Added two new AccessStatus values to SAS Device Page 0 - * for indicating routing problems. Added 3 reserved words - * to this page. - * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. - * Inserted missing reserved field into structure for IOC - * Page 6. - * Added more pending task bits to RAID Volume Page 0 - * VolumeStatusFlags defines. - * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. - * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 - * and SAS Expander Page 0 to flag a downstream initiator - * when in simplified routing mode. - * Removed SATA Init Failure defines for DiscoveryStatus - * fields of SAS IO Unit Page 0 and SAS Expander Page 0. - * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. - * Added PortGroups, DmaGroup, and ControlGroup fields to - * SAS Device Page 0. - * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO - * Unit Page 6. - * Added expander reduced functionality data to SAS - * Expander Page 0. - * Added SAS PHY Page 2 and SAS PHY Page 3. - * 07-30-09 02.00.12 Added IO Unit Page 7. - * Added new device ids. - * Added SAS IO Unit Page 5. - * Added partial and slumber power management capable flags - * to SAS Device Page 0 Flags field. - * Added PhyInfo defines for power condition. - * Added Ethernet configuration pages. - * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. - * Added SAS PHY Page 4 structure and defines. - * -------------------------------------------------------------------------- - -mpi2_init.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. - * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. - * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. - * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. - * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. - * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO - * Control field Task Attribute flags. - * Moved LUN field defines to mpi2.h becasue they are - * common to many structures. - * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to - * Query Asynchronous Event. - * Defined two new bits in the SlotStatus field of the SCSI - * Enclosure Processor Request and Reply. - * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for - * both SCSI IO Error Reply and SCSI Task Management Reply. - * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. - * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. - * -------------------------------------------------------------------------- - -mpi2_ioc.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to - * MaxTargets. - * Added TotalImageSize field to FWDownload Request. - * Added reserved words to FWUpload Request. - * 06-26-07 02.00.02 Added IR Configuration Change List Event. - * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit - * request and replaced it with - * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. - * Replaced the MinReplyQueueDepth field of the IOCFacts - * reply with MaxReplyDescriptorPostQueueDepth. - * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum - * depth for the Reply Descriptor Post Queue. - * Added SASAddress field to Initiator Device Table - * Overflow Event data. - * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING - * for SAS Initiator Device Status Change Event data. - * Modified Reason Code defines for SAS Topology Change - * List Event data, including adding a bit for PHY Vacant - * status, and adding a mask for the Reason Code. - * Added define for - * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. - * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. - * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of - * the IOCFacts Reply. - * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. - * Moved MPI2_VERSION_UNION to mpi2.h. - * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks - * instead of enables, and added SASBroadcastPrimitiveMasks - * field. - * Added Log Entry Added Event and related structure. - * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. - * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. - * Added MaxVolumes and MaxPersistentEntries fields to - * IOCFacts reply. - * Added ProtocalFlags and IOCCapabilities fields to - * MPI2_FW_IMAGE_HEADER. - * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. - * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to - * a U16 (from a U32). - * Removed extra 's' from EventMasks name. - * 06-27-08 02.00.08 Fixed an offset in a comment. - * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. - * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and - * renamed MinReplyFrameSize to ReplyFrameSize. - * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. - * Added two new RAIDOperation values for Integrated RAID - * Operations Status Event data. - * Added four new IR Configuration Change List Event data - * ReasonCode values. - * Added two new ReasonCode defines for SAS Device Status - * Change Event data. - * Added three new DiscoveryStatus bits for the SAS - * Discovery event data. - * Added Multiplexing Status Change bit to the PhyStatus - * field of the SAS Topology Change List event data. - * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. - * BootFlags are now product-specific. - * Added defines for the indivdual signature bytes - * for MPI2_INIT_IMAGE_FOOTER. - * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. - * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR - * define. - * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE - * define. - * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. - * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. - * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. - * Added two new reason codes for SAS Device Status Change - * Event. - * Added new event: SAS PHY Counter. - * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. - * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. - * Added new product id family for 2208. - * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. - * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. - * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. - * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. - * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. - * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. - * Added Host Based Discovery Phy Event data. - * Added defines for ProductID Product field - * (MPI2_FW_HEADER_PID_). - * Modified values for SAS ProductID Family - * (MPI2_FW_HEADER_PID_FAMILY_). - * -------------------------------------------------------------------------- - -mpi2_raid.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 08-31-07 02.00.01 Modifications to RAID Action request and reply, - * including the Actions and ActionData. - * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. - * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that - * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT - * can be sized by the build environment. - * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of - * VolumeCreationFlags and marked the old one as obsolete. - * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. - * -------------------------------------------------------------------------- - -mpi2_sas.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit - * Control Request. - * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control - * Request. - * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST - * to MPI2_SGE_IO_UNION since it supports chained SGLs. - * 05-12-10 02.00.04 Modified some comments. - * -------------------------------------------------------------------------- - -mpi2_targ.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 08-31-07 02.00.01 Added Command Buffer Data Location Address Space bits to - * BufferPostFlags field of CommandBufferPostBase Request. - * 02-29-08 02.00.02 Modified various names to make them 32-character unique. - * 10-02-08 02.00.03 Removed NextCmdBufferOffset from - * MPI2_TARGET_CMD_BUF_POST_BASE_REQUEST. - * Target Status Send Request only takes a single SGE for - * response data. - * -------------------------------------------------------------------------- - -mpi2_tool.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release - * structures and defines. - * 02-29-08 02.00.02 Modified various names to make them 32-character unique. - * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. - * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request - * and reply messages. - * Added MPI2_DIAG_BUF_TYPE_EXTENDED. - * Incremented MPI2_DIAG_BUF_TYPE_COUNT. - * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. - * -------------------------------------------------------------------------- - -mpi2_type.h - * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. - * -------------------------------------------------------------------------- - -mpi2_ra.h - * 05-06-09 02.00.00 Initial version. - * -------------------------------------------------------------------------- - -mpi2_hbd.h - * 10-28-09 02.00.00 Initial version. - * -------------------------------------------------------------------------- - - -mpi2_history.txt Parts list history - -Filename 02.00.14 02.00.13 02.00.12 ----------- -------- -------- -------- -mpi2.h 02.00.14 02.00.13 02.00.12 -mpi2_cnfg.h 02.00.13 02.00.12 02.00.11 -mpi2_init.h 02.00.08 02.00.07 02.00.07 -mpi2_ioc.h 02.00.13 02.00.12 02.00.11 -mpi2_raid.h 02.00.04 02.00.04 02.00.03 -mpi2_sas.h 02.00.03 02.00.02 02.00.02 -mpi2_targ.h 02.00.03 02.00.03 02.00.03 -mpi2_tool.h 02.00.04 02.00.04 02.00.03 -mpi2_type.h 02.00.00 02.00.00 02.00.00 -mpi2_ra.h 02.00.00 02.00.00 02.00.00 -mpi2_hbd.h 02.00.00 - -Filename 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.11 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 -mpi2_cnfg.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.06 02.00.06 -mpi2_init.h 02.00.06 02.00.06 02.00.05 02.00.05 02.00.04 02.00.03 -mpi2_ioc.h 02.00.10 02.00.09 02.00.08 02.00.07 02.00.07 02.00.06 -mpi2_raid.h 02.00.03 02.00.03 02.00.03 02.00.03 02.00.02 02.00.02 -mpi2_sas.h 02.00.02 02.00.02 02.00.01 02.00.01 02.00.01 02.00.01 -mpi2_targ.h 02.00.03 02.00.03 02.00.02 02.00.02 02.00.02 02.00.02 -mpi2_tool.h 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 02.00.02 -mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 - -Filename 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 ----------- -------- -------- -------- -------- -------- -------- -mpi2.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 -mpi2_cnfg.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 -mpi2_init.h 02.00.02 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_ioc.h 02.00.05 02.00.04 02.00.03 02.00.02 02.00.01 02.00.00 -mpi2_raid.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00 -mpi2_sas.h 02.00.01 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 -mpi2_targ.h 02.00.01 02.00.01 02.00.01 02.00.00 02.00.00 02.00.00 -mpi2_tool.h 02.00.01 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 -mpi2_type.h 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 02.00.00 - diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h index 608f6d6e6fca..fdffde1ebc0f 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h @@ -6,7 +6,7 @@ * Title: MPI Serial Attached SCSI structures and definitions * Creation Date: February 9, 2007 * - * mpi2_sas.h Version: 02.00.04 + * mpi2_sas.h Version: 02.00.05 * * Version History * --------------- @@ -21,6 +21,7 @@ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST * to MPI2_SGE_IO_UNION since it supports chained SGLs. * 05-12-10 02.00.04 Modified some comments. + * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. * -------------------------------------------------------------------------- */ @@ -163,7 +164,7 @@ typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST U32 Reserved4; /* 0x14 */ U32 DataLength; /* 0x18 */ U8 CommandFIS[20]; /* 0x1C */ - MPI2_SGE_IO_UNION SGL; /* 0x20 */ + MPI2_SGE_IO_UNION SGL; /* 0x30 */ } MPI2_SATA_PASSTHROUGH_REQUEST, MPI2_POINTER PTR_MPI2_SATA_PASSTHROUGH_REQUEST, Mpi2SataPassthroughRequest_t, MPI2_POINTER pMpi2SataPassthroughRequest_t; @@ -246,6 +247,8 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST #define MPI2_SAS_OP_REMOVE_DEVICE (0x0D) #define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E) #define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F) +#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14) +#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15) #define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80) /* values for the PrimFlags field */ diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h index 5c6e3a67bb94..2a4bceda364b 100644 --- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h @@ -6,7 +6,7 @@ * Title: MPI diagnostic tool structures and definitions * Creation Date: March 26, 2007 * - * mpi2_tool.h Version: 02.00.05 + * mpi2_tool.h Version: 02.00.06 * * Version History * --------------- @@ -23,6 +23,8 @@ * Added MPI2_DIAG_BUF_TYPE_EXTENDED. * Incremented MPI2_DIAG_BUF_TYPE_COUNT. * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. + * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer + * Post Request. * -------------------------------------------------------------------------- */ @@ -354,6 +356,10 @@ typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST /* count of the number of buffer types */ #define MPI2_DIAG_BUF_TYPE_COUNT (0x03) +/* values for the Flags field */ +#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) +#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001) + /**************************************************************************** * Diagnostic Buffer Post reply diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 9ead0399808a..e8a6f1cf1e4b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -752,20 +752,19 @@ static u8 _base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid) { int i; - u8 cb_idx = 0xFF; - - if (smid >= ioc->hi_priority_smid) { - if (smid < ioc->internal_smid) { - i = smid - ioc->hi_priority_smid; - cb_idx = ioc->hpr_lookup[i].cb_idx; - } else if (smid <= ioc->hba_queue_depth) { - i = smid - ioc->internal_smid; - cb_idx = ioc->internal_lookup[i].cb_idx; - } - } else { + u8 cb_idx; + + if (smid < ioc->hi_priority_smid) { i = smid - 1; cb_idx = ioc->scsi_lookup[i].cb_idx; - } + } else if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + cb_idx = ioc->hpr_lookup[i].cb_idx; + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + cb_idx = ioc->internal_lookup[i].cb_idx; + } else + cb_idx = 0xFF; return cb_idx; } @@ -1430,7 +1429,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx, struct scsi_cmnd *scmd) { unsigned long flags; - struct request_tracker *request; + struct scsiio_tracker *request; u16 smid; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); @@ -1442,7 +1441,7 @@ mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx, } request = list_entry(ioc->free_list.next, - struct request_tracker, tracker_list); + struct scsiio_tracker, tracker_list); request->scmd = scmd; request->cb_idx = cb_idx; smid = request->smid; @@ -1496,48 +1495,47 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid) struct chain_tracker *chain_req, *next; spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); - if (smid >= ioc->hi_priority_smid) { - if (smid < ioc->internal_smid) { - /* hi-priority */ - i = smid - ioc->hi_priority_smid; - ioc->hpr_lookup[i].cb_idx = 0xFF; - list_add_tail(&ioc->hpr_lookup[i].tracker_list, - &ioc->hpr_free_list); - } else { - /* internal queue */ - i = smid - ioc->internal_smid; - ioc->internal_lookup[i].cb_idx = 0xFF; - list_add_tail(&ioc->internal_lookup[i].tracker_list, - &ioc->internal_free_list); + if (smid < ioc->hi_priority_smid) { + /* scsiio queue */ + i = smid - 1; + if (!list_empty(&ioc->scsi_lookup[i].chain_list)) { + list_for_each_entry_safe(chain_req, next, + &ioc->scsi_lookup[i].chain_list, tracker_list) { + list_del_init(&chain_req->tracker_list); + list_add_tail(&chain_req->tracker_list, + &ioc->free_chain_list); + } } + ioc->scsi_lookup[i].cb_idx = 0xFF; + ioc->scsi_lookup[i].scmd = NULL; + list_add_tail(&ioc->scsi_lookup[i].tracker_list, + &ioc->free_list); spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - return; - } - /* scsiio queue */ - i = smid - 1; - if (!list_empty(&ioc->scsi_lookup[i].chain_list)) { - list_for_each_entry_safe(chain_req, next, - &ioc->scsi_lookup[i].chain_list, tracker_list) { - list_del_init(&chain_req->tracker_list); - list_add_tail(&chain_req->tracker_list, - &ioc->free_chain_list); + /* + * See _wait_for_commands_to_complete() call with regards + * to this code. + */ + if (ioc->shost_recovery && ioc->pending_io_count) { + if (ioc->pending_io_count == 1) + wake_up(&ioc->reset_wq); + ioc->pending_io_count--; } + return; + } else if (smid < ioc->internal_smid) { + /* hi-priority */ + i = smid - ioc->hi_priority_smid; + ioc->hpr_lookup[i].cb_idx = 0xFF; + list_add_tail(&ioc->hpr_lookup[i].tracker_list, + &ioc->hpr_free_list); + } else if (smid <= ioc->hba_queue_depth) { + /* internal queue */ + i = smid - ioc->internal_smid; + ioc->internal_lookup[i].cb_idx = 0xFF; + list_add_tail(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); } - ioc->scsi_lookup[i].cb_idx = 0xFF; - ioc->scsi_lookup[i].scmd = NULL; - list_add_tail(&ioc->scsi_lookup[i].tracker_list, - &ioc->free_list); spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - - /* - * See _wait_for_commands_to_complete() call with regards to this code. - */ - if (ioc->shost_recovery && ioc->pending_io_count) { - if (ioc->pending_io_count == 1) - wake_up(&ioc->reset_wq); - ioc->pending_io_count--; - } } /** @@ -1725,6 +1723,31 @@ _base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc) } /** + * _base_display_intel_branding - Display branding string + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc) +{ + if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_INTEL && + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008) { + + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_INTEL_RMS2LL080_SSDID: + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS2LL080_BRANDING); + break; + case MPT2SAS_INTEL_RMS2LL040_SSDID: + printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS2LL040_BRANDING); + break; + } + } +} + +/** * _base_display_ioc_capabilities - Disply IOC's capabilities. * @ioc: per adapter object * @@ -1754,6 +1777,7 @@ _base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc) ioc->bios_pg3.BiosVersion & 0x000000FF); _base_display_dell_branding(ioc); + _base_display_intel_branding(ioc); printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name); @@ -2252,9 +2276,9 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) ioc->name, (unsigned long long) ioc->request_dma)); total_sz += sz; - sz = ioc->scsiio_depth * sizeof(struct request_tracker); + sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker); ioc->scsi_lookup_pages = get_order(sz); - ioc->scsi_lookup = (struct request_tracker *)__get_free_pages( + ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages( GFP_KERNEL, ioc->scsi_lookup_pages); if (!ioc->scsi_lookup) { printk(MPT2SAS_ERR_FMT "scsi_lookup: get_free_pages failed, " diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 283568c6fb04..a3f8aa9baea4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,8 +69,8 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "07.100.00.00" -#define MPT2SAS_MAJOR_VERSION 07 +#define MPT2SAS_DRIVER_VERSION "08.100.00.00" +#define MPT2SAS_MAJOR_VERSION 08 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 00 #define MPT2SAS_RELEASE_VERSION 00 @@ -101,7 +101,8 @@ #define MPT_NAME_LENGTH 32 /* generic length of strings */ #define MPT_STRING_LENGTH 64 -#define MPT_MAX_CALLBACKS 16 +#define MPT_MAX_CALLBACKS 16 + #define CAN_SLEEP 1 #define NO_SLEEP 0 @@ -154,6 +155,20 @@ #define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22 /* + * Intel HBA branding + */ +#define MPT2SAS_INTEL_RMS2LL080_BRANDING \ + "Intel Integrated RAID Module RMS2LL080" +#define MPT2SAS_INTEL_RMS2LL040_BRANDING \ + "Intel Integrated RAID Module RMS2LL040" + +/* + * Intel HBA SSDIDs + */ +#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E +#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F + +/* * per target private data */ #define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01 @@ -431,14 +446,14 @@ struct chain_tracker { }; /** - * struct request_tracker - firmware request tracker + * struct scsiio_tracker - scsi mf request tracker * @smid: system message id * @scmd: scsi request pointer * @cb_idx: callback index * @chain_list: list of chains associated to this IO * @tracker_list: list of free request (ioc->free_list) */ -struct request_tracker { +struct scsiio_tracker { u16 smid; struct scsi_cmnd *scmd; u8 cb_idx; @@ -447,6 +462,19 @@ struct request_tracker { }; /** + * struct request_tracker - misc mf request tracker + * @smid: system message id + * @scmd: scsi request pointer + * @cb_idx: callback index + * @tracker_list: list of free request (ioc->free_list) + */ +struct request_tracker { + u16 smid; + u8 cb_idx; + struct list_head tracker_list; +}; + +/** * struct _tr_list - target reset list * @handle: device handle * @state: state machine @@ -709,7 +737,7 @@ struct MPT2SAS_ADAPTER { u8 *request; dma_addr_t request_dma; u32 request_dma_sz; - struct request_tracker *scsi_lookup; + struct scsiio_tracker *scsi_lookup; ulong scsi_lookup_pages; spinlock_t scsi_lookup_lock; struct list_head free_list; diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 5ded3db6e316..6ceb7759bfe5 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -6975,7 +6975,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) u32 device_state; mpt2sas_base_stop_watchdog(ioc); - flush_scheduled_work(); scsi_block_requests(shost); device_state = pci_choose_state(pdev, state); printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering " diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index b37c8a3c1bb0..86afb13f1e79 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -1005,11 +1005,23 @@ int osd_req_read_sg(struct osd_request *or, const struct osd_sg_entry *sglist, unsigned numentries) { u64 len; - int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len); + u64 off; + int ret; - if (ret) - return ret; - osd_req_read(or, obj, 0, bio, len); + if (numentries > 1) { + off = 0; + ret = _add_sg_continuation_descriptor(or, sglist, numentries, + &len); + if (ret) + return ret; + } else { + /* Optimize the case of single segment, read_sg is a + * bidi operation. + */ + len = sglist->len; + off = sglist->offset; + } + osd_req_read(or, obj, off, bio, len); return 0; } diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 54de1d1af1a7..521e2182d45b 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -1484,7 +1484,7 @@ static int osst_read_back_buffer_and_rewrite(struct osst_tape * STp, struct osst int dbg = debugging; #endif - if ((buffer = (unsigned char *)vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL) + if ((buffer = vmalloc((nframes + 1) * OS_DATA_SIZE)) == NULL) return (-EIO); printk(KERN_INFO "%s:I: Reading back %d frames from drive buffer%s\n", @@ -2296,7 +2296,7 @@ static int osst_write_header(struct osst_tape * STp, struct osst_request ** aSRp if (STp->raw) return 0; if (STp->header_cache == NULL) { - if ((STp->header_cache = (os_header_t *)vmalloc(sizeof(os_header_t))) == NULL) { + if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) { printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name); return (-ENOMEM); } @@ -2484,7 +2484,7 @@ static int __osst_analyze_headers(struct osst_tape * STp, struct osst_request ** name, ppos, update_frame_cntr); #endif if (STp->header_cache == NULL) { - if ((STp->header_cache = (os_header_t *)vmalloc(sizeof(os_header_t))) == NULL) { + if ((STp->header_cache = vmalloc(sizeof(os_header_t))) == NULL) { printk(KERN_ERR "%s:E: Failed to allocate header cache\n", name); return 0; } @@ -5851,9 +5851,7 @@ static int osst_probe(struct device *dev) /* if this is the first attach, build the infrastructure */ write_lock(&os_scsi_tapes_lock); if (os_scsi_tapes == NULL) { - os_scsi_tapes = - (struct osst_tape **)kmalloc(osst_max_dev * sizeof(struct osst_tape *), - GFP_ATOMIC); + os_scsi_tapes = kmalloc(osst_max_dev * sizeof(struct osst_tape *), GFP_ATOMIC); if (os_scsi_tapes == NULL) { write_unlock(&os_scsi_tapes_lock); printk(KERN_ERR "osst :E: Unable to allocate array for OnStream SCSI tapes.\n"); diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index d8db0137c0c7..18b6c55cd08c 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1382,53 +1382,50 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, return MPI_IO_STATUS_BUSY; } -static void pm8001_work_queue(struct work_struct *work) +static void pm8001_work_fn(struct work_struct *work) { - struct delayed_work *dw = container_of(work, struct delayed_work, work); - struct pm8001_wq *wq = container_of(dw, struct pm8001_wq, work_q); + struct pm8001_work *pw = container_of(work, struct pm8001_work, work); struct pm8001_device *pm8001_dev; - struct domain_device *dev; + struct domain_device *dev; - switch (wq->handler) { + switch (pw->handler) { case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: - pm8001_dev = wq->data; + pm8001_dev = pw->data; dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: - pm8001_dev = wq->data; + pm8001_dev = pw->data; dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; case IO_DS_IN_ERROR: - pm8001_dev = wq->data; + pm8001_dev = pw->data; dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; case IO_DS_NON_OPERATIONAL: - pm8001_dev = wq->data; + pm8001_dev = pw->data; dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; } - list_del(&wq->entry); - kfree(wq); + kfree(pw); } static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, int handler) { - struct pm8001_wq *wq; + struct pm8001_work *pw; int ret = 0; - wq = kmalloc(sizeof(struct pm8001_wq), GFP_ATOMIC); - if (wq) { - wq->pm8001_ha = pm8001_ha; - wq->data = data; - wq->handler = handler; - INIT_DELAYED_WORK(&wq->work_q, pm8001_work_queue); - list_add_tail(&wq->entry, &pm8001_ha->wq_list); - schedule_delayed_work(&wq->work_q, 0); + pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC); + if (pw) { + pw->pm8001_ha = pm8001_ha; + pw->data = data; + pw->handler = handler; + INIT_WORK(&pw->work, pm8001_work_fn); + queue_work(pm8001_wq, &pw->work); } else ret = -ENOMEM; diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index b95285f3383f..002360da01e3 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -51,6 +51,8 @@ static int pm8001_id; LIST_HEAD(hba_list); +struct workqueue_struct *pm8001_wq; + /** * The main structure which LLDD must register for scsi core. */ @@ -134,7 +136,6 @@ static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, static void pm8001_free(struct pm8001_hba_info *pm8001_ha) { int i; - struct pm8001_wq *wq; if (!pm8001_ha) return; @@ -150,8 +151,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) PM8001_CHIP_DISP->chip_iounmap(pm8001_ha); if (pm8001_ha->shost) scsi_host_put(pm8001_ha->shost); - list_for_each_entry(wq, &pm8001_ha->wq_list, entry) - cancel_delayed_work(&wq->work_q); + flush_workqueue(pm8001_wq); kfree(pm8001_ha->tags); kfree(pm8001_ha); } @@ -381,7 +381,6 @@ pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost) pm8001_ha->sas = sha; pm8001_ha->shost = shost; pm8001_ha->id = pm8001_id++; - INIT_LIST_HEAD(&pm8001_ha->wq_list); pm8001_ha->logging_level = 0x01; sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); #ifdef PM8001_USE_TASKLET @@ -758,7 +757,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) int i , pos; u32 device_state; pm8001_ha = sha->lldd_ha; - flush_scheduled_work(); + flush_workqueue(pm8001_wq); scsi_block_requests(pm8001_ha->shost); pos = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pos == 0) { @@ -870,17 +869,26 @@ static struct pci_driver pm8001_pci_driver = { */ static int __init pm8001_init(void) { - int rc; + int rc = -ENOMEM; + + pm8001_wq = alloc_workqueue("pm8001", 0, 0); + if (!pm8001_wq) + goto err; + pm8001_id = 0; pm8001_stt = sas_domain_attach_transport(&pm8001_transport_ops); if (!pm8001_stt) - return -ENOMEM; + goto err_wq; rc = pci_register_driver(&pm8001_pci_driver); if (rc) - goto err_out; + goto err_tp; return 0; -err_out: + +err_tp: sas_release_transport(pm8001_stt); +err_wq: + destroy_workqueue(pm8001_wq); +err: return rc; } @@ -888,6 +896,7 @@ static void __exit pm8001_exit(void) { pci_unregister_driver(&pm8001_pci_driver); sas_release_transport(pm8001_stt); + destroy_workqueue(pm8001_wq); } module_init(pm8001_init); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 7f064f9ca828..bdb6b27dedd6 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -50,6 +50,7 @@ #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/interrupt.h> +#include <linux/workqueue.h> #include <scsi/libsas.h> #include <scsi/scsi_tcq.h> #include <scsi/sas_ata.h> @@ -379,18 +380,16 @@ struct pm8001_hba_info { #ifdef PM8001_USE_TASKLET struct tasklet_struct tasklet; #endif - struct list_head wq_list; u32 logging_level; u32 fw_status; const struct firmware *fw_image; }; -struct pm8001_wq { - struct delayed_work work_q; +struct pm8001_work { + struct work_struct work; struct pm8001_hba_info *pm8001_ha; void *data; int handler; - struct list_head entry; }; struct pm8001_fw_image_header { @@ -460,6 +459,9 @@ struct fw_control_ex { void *param3; }; +/* pm8001 workqueue */ +extern struct workqueue_struct *pm8001_wq; + /******************** function prototype *********************/ int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out); void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 321cf3ae8630..bcf858e88c64 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -5454,7 +5454,7 @@ static void __devexit pmcraid_remove(struct pci_dev *pdev) pmcraid_shutdown(pdev); pmcraid_disable_interrupts(pinstance, ~0); - flush_scheduled_work(); + flush_work_sync(&pinstance->worker_q); pmcraid_kill_tasklets(pinstance); pmcraid_unregister_interrupt_handler(pinstance); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index ccfc8e78be21..6c51c0a35b9e 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2402,13 +2402,13 @@ struct qla_hw_data { volatile struct { uint32_t mbox_int :1; uint32_t mbox_busy :1; - uint32_t disable_risc_code_load :1; uint32_t enable_64bit_addressing :1; uint32_t enable_lip_reset :1; uint32_t enable_target_reset :1; uint32_t enable_lip_full_login :1; uint32_t enable_led_scheme :1; + uint32_t msi_enabled :1; uint32_t msix_enabled :1; uint32_t disable_serdes :1; @@ -2417,6 +2417,7 @@ struct qla_hw_data { uint32_t pci_channel_io_perm_failure :1; uint32_t fce_enabled :1; uint32_t fac_supported :1; + uint32_t chip_reset_done :1; uint32_t port0 :1; uint32_t running_gold_fw :1; @@ -2424,9 +2425,11 @@ struct qla_hw_data { uint32_t cpu_affinity_enabled :1; uint32_t disable_msix_handshake :1; uint32_t fcp_prio_enabled :1; - uint32_t fw_hung :1; - uint32_t quiesce_owner:1; + uint32_t isp82xx_fw_hung:1; + + uint32_t quiesce_owner:1; uint32_t thermal_supported:1; + uint32_t isp82xx_reset_hdlr_active:1; /* 26 bits */ } flags; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 89e900adb679..d48326ee3f61 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -565,6 +565,7 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); extern void qla82xx_start_iocbs(srb_t *); extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); +extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); /* BSG related functions */ extern int qla24xx_bsg_request(struct fc_bsg_job *); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 4c083928c2fb..74a91b6dfc68 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -121,8 +121,11 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, rval = QLA_FUNCTION_FAILED; if (ms_pkt->entry_status != 0) { - DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n", - vha->host_no, routine, ms_pkt->entry_status)); + DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status " + "(%x) on port_id: %02x%02x%02x.\n", + vha->host_no, routine, ms_pkt->entry_status, + vha->d_id.b.domain, vha->d_id.b.area, + vha->d_id.b.al_pa)); } else { if (IS_FWI2_CAPABLE(ha)) comp_status = le16_to_cpu( @@ -136,8 +139,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, if (ct_rsp->header.response != __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { DEBUG2_3(printk("scsi(%ld): %s failed, " - "rejected request:\n", vha->host_no, - routine)); + "rejected request on port_id: %02x%02x%02x\n", + vha->host_no, routine, + vha->d_id.b.domain, vha->d_id.b.area, + vha->d_id.b.al_pa)); DEBUG2_3(qla2x00_dump_buffer( (uint8_t *)&ct_rsp->header, sizeof(struct ct_rsp_hdr))); @@ -147,8 +152,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, break; default: DEBUG2_3(printk("scsi(%ld): %s failed, completion " - "status (%x).\n", vha->host_no, routine, - comp_status)); + "status (%x) on port_id: %02x%02x%02x.\n", + vha->host_no, routine, comp_status, + vha->d_id.b.domain, vha->d_id.b.area, + vha->d_id.b.al_pa)); break; } } @@ -1965,7 +1972,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) "scsi(%ld): GFF_ID issue IOCB failed " "(%d).\n", vha->host_no, rval)); } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, - "GPN_ID") != QLA_SUCCESS) { + "GFF_ID") != QLA_SUCCESS) { DEBUG2_3(printk(KERN_INFO "scsi(%ld): GFF_ID IOCB status had a " "failure status code\n", vha->host_no)); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d9479c3fe5f8..8575808dbae0 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1967,7 +1967,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) } else { /* Mailbox cmd failed. Timeout on min_wait. */ if (time_after_eq(jiffies, mtime) || - (IS_QLA82XX(ha) && ha->flags.fw_hung)) + ha->flags.isp82xx_fw_hung) break; } @@ -3945,8 +3945,13 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *vp; unsigned long flags; + fc_port_t *fcport; - vha->flags.online = 0; + /* For ISP82XX, driver waits for completion of the commands. + * online flag should be set. + */ + if (!IS_QLA82XX(ha)) + vha->flags.online = 0; ha->flags.chip_reset_done = 0; clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); ha->qla_stats.total_isp_aborts++; @@ -3954,7 +3959,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) qla_printk(KERN_INFO, ha, "Performing ISP error recovery - ha= %p.\n", ha); - /* Chip reset does not apply to 82XX */ + /* For ISP82XX, reset_chip is just disabling interrupts. + * Driver waits for the completion of the commands. + * the interrupts need to be enabled. + */ if (!IS_QLA82XX(ha)) ha->isp_ops->reset_chip(vha); @@ -3980,14 +3988,31 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) LOOP_DOWN_TIME); } + /* Clear all async request states across all VPs. */ + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + + list_for_each_entry(fcport, &vp->vp_fcports, list) + fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + if (!ha->flags.eeh_busy) { /* Make sure for ISP 82XX IO DMA is complete */ if (IS_QLA82XX(ha)) { - if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, - WAIT_HOST) == QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_INFO, ha, - "Done wait for pending commands\n")); - } + qla82xx_chip_reset_cleanup(vha); + + /* Done waiting for pending commands. + * Reset the online flag. + */ + vha->flags.online = 0; } /* Requeue all commands in outstanding command list. */ diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 4c1ba6263eb3..d78d5896fc33 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -328,6 +328,7 @@ qla2x00_start_scsi(srb_t *sp) struct qla_hw_data *ha; struct req_que *req; struct rsp_que *rsp; + char tag[2]; /* Setup device pointers. */ ret = 0; @@ -406,7 +407,22 @@ qla2x00_start_scsi(srb_t *sp) cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun); /* Update tagged queuing modifier */ - cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + cmd_pkt->control_flags = + __constant_cpu_to_le16(CF_HEAD_TAG); + break; + case ORDERED_QUEUE_TAG: + cmd_pkt->control_flags = + __constant_cpu_to_le16(CF_ORDERED_TAG); + break; + default: + cmd_pkt->control_flags = + __constant_cpu_to_le16(CF_SIMPLE_TAG); + break; + } + } /* Load SCSI command packet. */ memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); @@ -971,6 +987,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, uint16_t fcp_cmnd_len; struct fcp_cmnd *fcp_cmnd; dma_addr_t crc_ctx_dma; + char tag[2]; cmd = sp->cmd; @@ -1068,9 +1085,27 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32( MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF)); - fcp_cmnd->task_attribute = 0; fcp_cmnd->task_management = 0; + /* + * Update tagged queuing modifier if using command tag queuing + */ + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE; + break; + case ORDERED_QUEUE_TAG: + fcp_cmnd->task_attribute = TSK_ORDERED; + break; + default: + fcp_cmnd->task_attribute = 0; + break; + } + } else { + fcp_cmnd->task_attribute = 0; + } + cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data" @@ -1177,6 +1212,7 @@ qla24xx_start_scsi(srb_t *sp) struct scsi_cmnd *cmd = sp->cmd; struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; + char tag[2]; /* Setup device pointers. */ ret = 0; @@ -1260,6 +1296,18 @@ qla24xx_start_scsi(srb_t *sp) int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + cmd_pkt->task = TSK_HEAD_OF_QUEUE; + break; + case ORDERED_QUEUE_TAG: + cmd_pkt->task = TSK_ORDERED; + break; + } + } + /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index e473e9fb363c..7a7c0ecfe7dd 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -71,6 +71,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } + if (ha->flags.isp82xx_fw_hung) { + /* Setting Link-Down error */ + mcp->mb[0] = MBS_LINK_DOWN_ERROR; + rval = QLA_FUNCTION_FAILED; + goto premature_exit; + } + /* * Wait for active mailbox commands to finish by waiting at most tov * seconds. This is to serialize actual issuing of mailbox cmds during @@ -83,13 +90,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) return QLA_FUNCTION_TIMEOUT; } - if (IS_QLA82XX(ha) && ha->flags.fw_hung) { - /* Setting Link-Down error */ - mcp->mb[0] = MBS_LINK_DOWN_ERROR; - rval = QLA_FUNCTION_FAILED; - goto premature_exit; - } - ha->flags.mbox_busy = 1; /* Save mailbox command for debug */ ha->mcp = mcp; @@ -223,7 +223,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ha->flags.mbox_int = 0; clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - if (IS_QLA82XX(ha) && ha->flags.fw_hung) { + if (ha->flags.isp82xx_fw_hung) { ha->flags.mbox_busy = 0; /* Setting Link-Down error */ mcp->mb[0] = MBS_LINK_DOWN_ERROR; @@ -2462,22 +2462,19 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, "-- completion status (%x).\n", __func__, vha->host_no, le16_to_cpu(sts->comp_status))); rval = QLA_FUNCTION_FAILED; - } else if (!(le16_to_cpu(sts->scsi_status) & - SS_RESPONSE_INFO_LEN_VALID)) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- no response info (%x).\n", __func__, vha->host_no, - le16_to_cpu(sts->scsi_status))); - rval = QLA_FUNCTION_FAILED; - } else if (le32_to_cpu(sts->rsp_data_len) < 4) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- not enough response info (%d).\n", __func__, - vha->host_no, le32_to_cpu(sts->rsp_data_len))); - rval = QLA_FUNCTION_FAILED; - } else if (sts->data[3]) { - DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " - "-- response (%x).\n", __func__, - vha->host_no, sts->data[3])); - rval = QLA_FUNCTION_FAILED; + } else if (le16_to_cpu(sts->scsi_status) & + SS_RESPONSE_INFO_LEN_VALID) { + if (le32_to_cpu(sts->rsp_data_len) < 4) { + DEBUG2_3_11(printk("%s(%ld): ignoring inconsistent " + "data length -- not enough response info (%d).\n", + __func__, vha->host_no, + le32_to_cpu(sts->rsp_data_len))); + } else if (sts->data[3]) { + DEBUG2_3_11(printk("%s(%ld): failed to complete IOCB " + "-- response (%x).\n", __func__, + vha->host_no, sts->data[3])); + rval = QLA_FUNCTION_FAILED; + } } /* Issue marker IOCB. */ diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index fdb96a3584a5..76ec876e6b21 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -7,6 +7,7 @@ #include "qla_def.h" #include <linux/delay.h> #include <linux/pci.h> +#include <scsi/scsi_tcq.h> #define MASK(n) ((1ULL<<(n))-1) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ @@ -2547,7 +2548,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); - *dsd_seg++ = dsd_list_len; + cmd_pkt->fcp_data_dseg_len = dsd_list_len; } else { *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); @@ -2620,6 +2621,7 @@ qla82xx_start_scsi(srb_t *sp) struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; struct rsp_que *rsp = NULL; + char tag[2]; /* Setup device pointers. */ ret = 0; @@ -2770,6 +2772,22 @@ sufficient_dsds: int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + /* + * Update tagged queuing modifier -- default is TSK_SIMPLE (0). + */ + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + ctx->fcp_cmnd->task_attribute = + TSK_HEAD_OF_QUEUE; + break; + case ORDERED_QUEUE_TAG: + ctx->fcp_cmnd->task_attribute = + TSK_ORDERED; + break; + } + } + /* build FCP_CMND IU */ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); @@ -2835,6 +2853,20 @@ sufficient_dsds: host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + /* + * Update tagged queuing modifier -- default is TSK_SIMPLE (0). + */ + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + cmd_pkt->task = TSK_HEAD_OF_QUEUE; + break; + case ORDERED_QUEUE_TAG: + cmd_pkt->task = TSK_ORDERED; + break; + } + } + /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); @@ -3457,46 +3489,28 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) } } -static void +int qla82xx_check_fw_alive(scsi_qla_host_t *vha) { - uint32_t fw_heartbeat_counter, halt_status; - struct qla_hw_data *ha = vha->hw; + uint32_t fw_heartbeat_counter; + int status = 0; - fw_heartbeat_counter = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); + fw_heartbeat_counter = qla82xx_rd_32(vha->hw, + QLA82XX_PEG_ALIVE_COUNTER); /* all 0xff, assume AER/EEH in progress, ignore */ if (fw_heartbeat_counter == 0xffffffff) - return; + return status; if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; - halt_status = qla82xx_rd_32(ha, - QLA82XX_PEG_HALT_STATUS1); - if (halt_status & HALT_STATUS_UNRECOVERABLE) { - set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); - } else { - qla_printk(KERN_INFO, ha, - "scsi(%ld): %s - detect abort needed\n", - vha->host_no, __func__); - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - } - qla2xxx_wake_dpc(vha); - ha->flags.fw_hung = 1; - if (ha->flags.mbox_busy) { - ha->flags.mbox_int = 1; - DEBUG2(qla_printk(KERN_ERR, ha, - "Due to fw hung, doing premature " - "completion of mbx command\n")); - if (test_bit(MBX_INTR_WAIT, - &ha->mbx_cmd_flags)) - complete(&ha->mbx_intr_comp); - } + status = 1; } } else vha->seconds_since_last_heartbeat = 0; vha->fw_heartbeat_counter = fw_heartbeat_counter; + return status; } /* @@ -3557,6 +3571,8 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) break; case QLA82XX_DEV_NEED_RESET: qla82xx_need_reset_handler(vha); + dev_init_timeout = jiffies + + (ha->nx_dev_init_timeout * HZ); break; case QLA82XX_DEV_NEED_QUIESCENT: qla82xx_need_qsnt_handler(vha); @@ -3596,30 +3612,18 @@ exit: void qla82xx_watchdog(scsi_qla_host_t *vha) { - uint32_t dev_state; + uint32_t dev_state, halt_status; struct qla_hw_data *ha = vha->hw; - dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - /* don't poll if reset is going on */ - if (!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || - test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || - test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))) { - if (dev_state == QLA82XX_DEV_NEED_RESET) { + if (!ha->flags.isp82xx_reset_hdlr_active) { + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + if (dev_state == QLA82XX_DEV_NEED_RESET && + !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { qla_printk(KERN_WARNING, ha, - "%s(): Adapter reset needed!\n", __func__); + "%s(): Adapter reset needed!\n", __func__); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); - ha->flags.fw_hung = 1; - if (ha->flags.mbox_busy) { - ha->flags.mbox_int = 1; - DEBUG2(qla_printk(KERN_ERR, ha, - "Need reset, doing premature " - "completion of mbx command\n")); - if (test_bit(MBX_INTR_WAIT, - &ha->mbx_cmd_flags)) - complete(&ha->mbx_intr_comp); - } } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { DEBUG(qla_printk(KERN_INFO, ha, @@ -3629,6 +3633,31 @@ void qla82xx_watchdog(scsi_qla_host_t *vha) qla2xxx_wake_dpc(vha); } else { qla82xx_check_fw_alive(vha); + if (qla82xx_check_fw_alive(vha)) { + halt_status = qla82xx_rd_32(ha, + QLA82XX_PEG_HALT_STATUS1); + if (halt_status & HALT_STATUS_UNRECOVERABLE) { + set_bit(ISP_UNRECOVERABLE, + &vha->dpc_flags); + } else { + qla_printk(KERN_INFO, ha, + "scsi(%ld): %s - detect abort needed\n", + vha->host_no, __func__); + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + } + qla2xxx_wake_dpc(vha); + ha->flags.isp82xx_fw_hung = 1; + if (ha->flags.mbox_busy) { + ha->flags.mbox_int = 1; + DEBUG2(qla_printk(KERN_ERR, ha, + "Due to fw hung, doing premature " + "completion of mbx command\n")); + if (test_bit(MBX_INTR_WAIT, + &ha->mbx_cmd_flags)) + complete(&ha->mbx_intr_comp); + } + } } } } @@ -3663,6 +3692,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) "Exiting.\n", __func__, vha->host_no); return QLA_SUCCESS; } + ha->flags.isp82xx_reset_hdlr_active = 1; qla82xx_idc_lock(ha); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); @@ -3683,7 +3713,8 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) qla82xx_idc_unlock(ha); if (rval == QLA_SUCCESS) { - ha->flags.fw_hung = 0; + ha->flags.isp82xx_fw_hung = 0; + ha->flags.isp82xx_reset_hdlr_active = 0; qla82xx_restart_isp(vha); } @@ -3791,3 +3822,71 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) return status; } + +void +qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) +{ + int i; + unsigned long flags; + struct qla_hw_data *ha = vha->hw; + + /* Check if 82XX firmware is alive or not + * We may have arrived here from NEED_RESET + * detection only + */ + if (!ha->flags.isp82xx_fw_hung) { + for (i = 0; i < 2; i++) { + msleep(1000); + if (qla82xx_check_fw_alive(vha)) { + ha->flags.isp82xx_fw_hung = 1; + if (ha->flags.mbox_busy) { + ha->flags.mbox_int = 1; + complete(&ha->mbx_intr_comp); + } + break; + } + } + } + + /* Abort all commands gracefully if fw NOT hung */ + if (!ha->flags.isp82xx_fw_hung) { + int cnt, que; + srb_t *sp; + struct req_que *req; + + spin_lock_irqsave(&ha->hardware_lock, flags); + for (que = 0; que < ha->max_req_queues; que++) { + req = ha->req_q_map[que]; + if (!req) + continue; + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (sp) { + if (!sp->ctx || + (sp->flags & SRB_FCP_CMND_DMA_VALID)) { + spin_unlock_irqrestore( + &ha->hardware_lock, flags); + if (ha->isp_ops->abort_command(sp)) { + qla_printk(KERN_INFO, ha, + "scsi(%ld): mbx abort command failed in %s\n", + vha->host_no, __func__); + } else { + qla_printk(KERN_INFO, ha, + "scsi(%ld): mbx abort command success in %s\n", + vha->host_no, __func__); + } + spin_lock_irqsave(&ha->hardware_lock, flags); + } + } + } + } + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + /* Wait for pending cmds (physical and virtual) to complete */ + if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, + WAIT_HOST) == QLA_SUCCESS) { + DEBUG2(qla_printk(KERN_INFO, ha, + "Done wait for pending commands\n")); + } + } +} diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index e90f7c16b956..75a966c94860 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -506,7 +506,7 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str) static inline srb_t * qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, - struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) + struct scsi_cmnd *cmd) { srb_t *sp; struct qla_hw_data *ha = vha->hw; @@ -520,14 +520,13 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, sp->cmd = cmd; sp->flags = 0; CMD_SP(cmd) = (void *)sp; - cmd->scsi_done = done; sp->ctx = NULL; return sp; } static int -qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; @@ -537,7 +536,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *) srb_t *sp; int rval; - spin_unlock_irq(vha->host->host_lock); if (ha->flags.eeh_busy) { if (ha->flags.pci_channel_io_perm_failure) cmd->result = DID_NO_CONNECT << 16; @@ -569,40 +567,32 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *) goto qc24_target_busy; } - sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done); + sp = qla2x00_get_new_sp(base_vha, fcport, cmd); if (!sp) - goto qc24_host_busy_lock; + goto qc24_host_busy; rval = ha->isp_ops->start_scsi(sp); if (rval != QLA_SUCCESS) goto qc24_host_busy_free_sp; - spin_lock_irq(vha->host->host_lock); - return 0; qc24_host_busy_free_sp: qla2x00_sp_free_dma(sp); mempool_free(sp, ha->srb_mempool); -qc24_host_busy_lock: - spin_lock_irq(vha->host->host_lock); +qc24_host_busy: return SCSI_MLQUEUE_HOST_BUSY; qc24_target_busy: - spin_lock_irq(vha->host->host_lock); return SCSI_MLQUEUE_TARGET_BUSY; qc24_fail_command: - spin_lock_irq(vha->host->host_lock); - done(cmd); + cmd->scsi_done(cmd); return 0; } -static DEF_SCSI_QCMD(qla2xxx_queuecommand) - - /* * qla2x00_eh_wait_on_command * Waits for the command to be returned by the Firmware for some @@ -821,17 +811,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); srb_t *sp; - int ret = SUCCESS; + int ret; unsigned int id, lun; unsigned long flags; int wait = 0; struct qla_hw_data *ha = vha->hw; - fc_block_scsi_eh(cmd); - if (!CMD_SP(cmd)) return SUCCESS; + ret = fc_block_scsi_eh(cmd); + if (ret != 0) + return ret; + ret = SUCCESS; + id = cmd->device->id; lun = cmd->device->lun; @@ -940,11 +933,13 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; int err; - fc_block_scsi_eh(cmd); - if (!fcport) return FAILED; + err = fc_block_scsi_eh(cmd); + if (err != 0) + return err; + qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): %s RESET ISSUED.\n", vha->host_no, cmd->device->id, cmd->device->lun, name); @@ -1018,14 +1013,17 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) int ret = FAILED; unsigned int id, lun; - fc_block_scsi_eh(cmd); - id = cmd->device->id; lun = cmd->device->lun; if (!fcport) return ret; + ret = fc_block_scsi_eh(cmd); + if (ret != 0) + return ret; + ret = FAILED; + qla_printk(KERN_INFO, vha->hw, "scsi(%ld:%d:%d): BUS RESET ISSUED.\n", vha->host_no, id, lun); @@ -1078,14 +1076,17 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) unsigned int id, lun; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - fc_block_scsi_eh(cmd); - id = cmd->device->id; lun = cmd->device->lun; if (!fcport) return ret; + ret = fc_block_scsi_eh(cmd); + if (ret != 0) + return ret; + ret = FAILED; + qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d): ADAPTER RESET ISSUED.\n", vha->host_no, id, lun); @@ -3805,7 +3806,7 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) ha->flags.eeh_busy = 1; /* For ISP82XX complete any pending mailbox cmd */ if (IS_QLA82XX(ha)) { - ha->flags.fw_hung = 1; + ha->flags.isp82xx_fw_hung = 1; if (ha->flags.mbox_busy) { ha->flags.mbox_int = 1; DEBUG2(qla_printk(KERN_ERR, ha, @@ -3945,7 +3946,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); qla82xx_idc_unlock(ha); - ha->flags.fw_hung = 0; + ha->flags.isp82xx_fw_hung = 0; rval = qla82xx_restart_isp(base_vha); qla82xx_idc_lock(ha); /* Clear driver state register */ @@ -3958,7 +3959,7 @@ uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha) "This devfn is not reset owner = 0x%x\n", ha->pdev->devfn)); if ((qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == QLA82XX_DEV_READY)) { - ha->flags.fw_hung = 0; + ha->flags.isp82xx_fw_hung = 0; rval = qla82xx_restart_isp(base_vha); qla82xx_idc_lock(ha); qla82xx_set_drv_active(base_vha); diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index cf0075a2d0c2..3a260c3f055a 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.05-k0" +#define QLA2XXX_VERSION "8.03.07.00" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 -#define QLA_DRIVER_PATCH_VER 5 +#define QLA_DRIVER_PATCH_VER 7 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 6ffbe9727dff..03e028e6e809 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -1027,7 +1027,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) ((ddb_entry->default_time2wait + 4) * HZ); - DEBUG2(printk("scsi%ld: ddb [%d] initate" + DEBUG2(printk("scsi%ld: ddb [%d] initiate" " RELOGIN after %d seconds\n", ha->host_no, ddb_entry->fw_ddb_index, diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 3fc1d256636f..967836ef5ab2 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -812,7 +812,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) ); start_dpc++; DEBUG(printk("scsi%ld:%d:%d: ddb [%d] " - "initate relogin after" + "initiate relogin after" " %d seconds\n", ha->host_no, ddb_entry->bus, ddb_entry->target, diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index a6b2d72022fc..fa5758cbdedb 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -89,32 +89,34 @@ static const char * scsi_debug_version_date = "20100324"; /* With these defaults, this driver will make 1 host with 1 target * (id 0) containing 1 logical unit (lun 0). That is 1 device. */ +#define DEF_ATO 1 #define DEF_DELAY 1 #define DEF_DEV_SIZE_MB 8 -#define DEF_EVERY_NTH 0 -#define DEF_NUM_PARTS 0 -#define DEF_OPTS 0 -#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ -#define DEF_PTYPE 0 +#define DEF_DIF 0 +#define DEF_DIX 0 #define DEF_D_SENSE 0 -#define DEF_NO_LUN_0 0 -#define DEF_VIRTUAL_GB 0 +#define DEF_EVERY_NTH 0 #define DEF_FAKE_RW 0 -#define DEF_VPD_USE_HOSTNO 1 -#define DEF_SECTOR_SIZE 512 -#define DEF_DIX 0 -#define DEF_DIF 0 #define DEF_GUARD 0 -#define DEF_ATO 1 -#define DEF_PHYSBLK_EXP 0 +#define DEF_LBPU 0 +#define DEF_LBPWS 0 +#define DEF_LBPWS10 0 #define DEF_LOWEST_ALIGNED 0 +#define DEF_NO_LUN_0 0 +#define DEF_NUM_PARTS 0 +#define DEF_OPTS 0 #define DEF_OPT_BLKS 64 +#define DEF_PHYSBLK_EXP 0 +#define DEF_PTYPE 0 +#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ +#define DEF_SECTOR_SIZE 512 +#define DEF_UNMAP_ALIGNMENT 0 +#define DEF_UNMAP_GRANULARITY 1 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF #define DEF_UNMAP_MAX_DESC 256 -#define DEF_UNMAP_GRANULARITY 1 -#define DEF_UNMAP_ALIGNMENT 0 -#define DEF_TPWS 0 -#define DEF_TPU 0 +#define DEF_VIRTUAL_GB 0 +#define DEF_VPD_USE_HOSTNO 1 +#define DEF_WRITESAME_LENGTH 0xFFFF /* bit mask values for scsi_debug_opts */ #define SCSI_DEBUG_OPT_NOISE 1 @@ -144,6 +146,7 @@ static const char * scsi_debug_version_date = "20100324"; /* when 1==SCSI_DEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this * sector on read commands: */ #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */ +#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */ /* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1) * or "peripheral device" addressing (value 0) */ @@ -155,36 +158,38 @@ static const char * scsi_debug_version_date = "20100324"; #define SCSI_DEBUG_CANQUEUE 255 static int scsi_debug_add_host = DEF_NUM_HOST; +static int scsi_debug_ato = DEF_ATO; static int scsi_debug_delay = DEF_DELAY; static int scsi_debug_dev_size_mb = DEF_DEV_SIZE_MB; +static int scsi_debug_dif = DEF_DIF; +static int scsi_debug_dix = DEF_DIX; +static int scsi_debug_dsense = DEF_D_SENSE; static int scsi_debug_every_nth = DEF_EVERY_NTH; +static int scsi_debug_fake_rw = DEF_FAKE_RW; +static int scsi_debug_guard = DEF_GUARD; +static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; static int scsi_debug_max_luns = DEF_MAX_LUNS; static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; -static int scsi_debug_num_parts = DEF_NUM_PARTS; +static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; static int scsi_debug_no_uld = 0; +static int scsi_debug_num_parts = DEF_NUM_PARTS; static int scsi_debug_num_tgts = DEF_NUM_TGTS; /* targets per host */ +static int scsi_debug_opt_blks = DEF_OPT_BLKS; static int scsi_debug_opts = DEF_OPTS; -static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; +static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; static int scsi_debug_ptype = DEF_PTYPE; /* SCSI peripheral type (0==disk) */ -static int scsi_debug_dsense = DEF_D_SENSE; -static int scsi_debug_no_lun_0 = DEF_NO_LUN_0; +static int scsi_debug_scsi_level = DEF_SCSI_LEVEL; +static int scsi_debug_sector_size = DEF_SECTOR_SIZE; static int scsi_debug_virtual_gb = DEF_VIRTUAL_GB; -static int scsi_debug_fake_rw = DEF_FAKE_RW; static int scsi_debug_vpd_use_hostno = DEF_VPD_USE_HOSTNO; -static int scsi_debug_sector_size = DEF_SECTOR_SIZE; -static int scsi_debug_dix = DEF_DIX; -static int scsi_debug_dif = DEF_DIF; -static int scsi_debug_guard = DEF_GUARD; -static int scsi_debug_ato = DEF_ATO; -static int scsi_debug_physblk_exp = DEF_PHYSBLK_EXP; -static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED; -static int scsi_debug_opt_blks = DEF_OPT_BLKS; -static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; -static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; -static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; +static unsigned int scsi_debug_lbpu = DEF_LBPU; +static unsigned int scsi_debug_lbpws = DEF_LBPWS; +static unsigned int scsi_debug_lbpws10 = DEF_LBPWS10; static unsigned int scsi_debug_unmap_alignment = DEF_UNMAP_ALIGNMENT; -static unsigned int scsi_debug_tpws = DEF_TPWS; -static unsigned int scsi_debug_tpu = DEF_TPU; +static unsigned int scsi_debug_unmap_granularity = DEF_UNMAP_GRANULARITY; +static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS; +static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC; +static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH; static int scsi_debug_cmnd_count = 0; @@ -206,6 +211,11 @@ static int sdebug_sectors_per; /* sectors per cylinder */ #define SCSI_DEBUG_MAX_CMD_LEN 32 +static unsigned int scsi_debug_lbp(void) +{ + return scsi_debug_lbpu | scsi_debug_lbpws | scsi_debug_lbpws10; +} + struct sdebug_dev_info { struct list_head dev_list; unsigned char sense_buff[SDEBUG_SENSE_LEN]; /* weak nexus */ @@ -727,7 +737,7 @@ static int inquiry_evpd_b0(unsigned char * arr) /* Optimal Transfer Length */ put_unaligned_be32(scsi_debug_opt_blks, &arr[8]); - if (scsi_debug_tpu) { + if (scsi_debug_lbpu) { /* Maximum Unmap LBA Count */ put_unaligned_be32(scsi_debug_unmap_max_blocks, &arr[16]); @@ -744,7 +754,10 @@ static int inquiry_evpd_b0(unsigned char * arr) /* Optimal Unmap Granularity */ put_unaligned_be32(scsi_debug_unmap_granularity, &arr[24]); - return 0x3c; /* Mandatory page length for thin provisioning */ + /* Maximum WRITE SAME Length */ + put_unaligned_be64(scsi_debug_write_same_length, &arr[32]); + + return 0x3c; /* Mandatory page length for Logical Block Provisioning */ return sizeof(vpdb0_data); } @@ -767,12 +780,15 @@ static int inquiry_evpd_b2(unsigned char *arr) memset(arr, 0, 0x8); arr[0] = 0; /* threshold exponent */ - if (scsi_debug_tpu) + if (scsi_debug_lbpu) arr[1] = 1 << 7; - if (scsi_debug_tpws) + if (scsi_debug_lbpws) arr[1] |= 1 << 6; + if (scsi_debug_lbpws10) + arr[1] |= 1 << 5; + return 0x8; } @@ -831,7 +847,8 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, arr[n++] = 0x89; /* ATA information */ arr[n++] = 0xb0; /* Block limits (SBC) */ arr[n++] = 0xb1; /* Block characteristics (SBC) */ - arr[n++] = 0xb2; /* Thin provisioning (SBC) */ + if (scsi_debug_lbp()) /* Logical Block Prov. (SBC) */ + arr[n++] = 0xb2; arr[3] = n - 4; /* number of supported VPD pages */ } else if (0x80 == cmd[2]) { /* unit serial number */ arr[1] = cmd[2]; /*sanity */ @@ -879,7 +896,7 @@ static int resp_inquiry(struct scsi_cmnd * scp, int target, } else if (0xb1 == cmd[2]) { /* Block characteristics (SBC) */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_b1(&arr[4]); - } else if (0xb2 == cmd[2]) { /* Thin provisioning (SBC) */ + } else if (0xb2 == cmd[2]) { /* Logical Block Prov. (SBC) */ arr[1] = cmd[2]; /*sanity */ arr[3] = inquiry_evpd_b2(&arr[4]); } else { @@ -1053,8 +1070,8 @@ static int resp_readcap16(struct scsi_cmnd * scp, arr[13] = scsi_debug_physblk_exp & 0xf; arr[14] = (scsi_debug_lowest_aligned >> 8) & 0x3f; - if (scsi_debug_tpu || scsi_debug_tpws) - arr[14] |= 0x80; /* TPE */ + if (scsi_debug_lbp()) + arr[14] |= 0x80; /* LBPME */ arr[15] = scsi_debug_lowest_aligned & 0xff; @@ -1791,15 +1808,15 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba, return ret; if ((SCSI_DEBUG_OPT_MEDIUM_ERR & scsi_debug_opts) && - (lba <= OPT_MEDIUM_ERR_ADDR) && + (lba <= (OPT_MEDIUM_ERR_ADDR + OPT_MEDIUM_ERR_NUM - 1)) && ((lba + num) > OPT_MEDIUM_ERR_ADDR)) { /* claim unrecoverable read error */ - mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, - 0); + mk_sense_buffer(devip, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0); /* set info field and valid bit for fixed descriptor */ if (0x70 == (devip->sense_buff[0] & 0x7f)) { devip->sense_buff[0] |= 0x80; /* Valid bit */ - ret = OPT_MEDIUM_ERR_ADDR; + ret = (lba < OPT_MEDIUM_ERR_ADDR) + ? OPT_MEDIUM_ERR_ADDR : (int)lba; devip->sense_buff[3] = (ret >> 24) & 0xff; devip->sense_buff[4] = (ret >> 16) & 0xff; devip->sense_buff[5] = (ret >> 8) & 0xff; @@ -2084,6 +2101,12 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, if (ret) return ret; + if (num > scsi_debug_write_same_length) { + mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, + 0); + return check_condition_result; + } + write_lock_irqsave(&atomic_rw, iflags); if (unmap && scsi_debug_unmap_granularity) { @@ -2695,37 +2718,40 @@ static int schedule_resp(struct scsi_cmnd * cmnd, /sys/bus/pseudo/drivers/scsi_debug directory is changed. */ module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR); +module_param_named(ato, scsi_debug_ato, int, S_IRUGO); module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR); module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO); +module_param_named(dif, scsi_debug_dif, int, S_IRUGO); +module_param_named(dix, scsi_debug_dix, int, S_IRUGO); module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR); module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR); module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); +module_param_named(guard, scsi_debug_guard, int, S_IRUGO); +module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO); +module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO); +module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); +module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); module_param_named(max_luns, scsi_debug_max_luns, int, S_IRUGO | S_IWUSR); module_param_named(max_queue, scsi_debug_max_queue, int, S_IRUGO | S_IWUSR); module_param_named(no_lun_0, scsi_debug_no_lun_0, int, S_IRUGO | S_IWUSR); module_param_named(no_uld, scsi_debug_no_uld, int, S_IRUGO); module_param_named(num_parts, scsi_debug_num_parts, int, S_IRUGO); module_param_named(num_tgts, scsi_debug_num_tgts, int, S_IRUGO | S_IWUSR); +module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO); module_param_named(opts, scsi_debug_opts, int, S_IRUGO | S_IWUSR); +module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); module_param_named(ptype, scsi_debug_ptype, int, S_IRUGO | S_IWUSR); module_param_named(scsi_level, scsi_debug_scsi_level, int, S_IRUGO); -module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); -module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, - S_IRUGO | S_IWUSR); module_param_named(sector_size, scsi_debug_sector_size, int, S_IRUGO); -module_param_named(dix, scsi_debug_dix, int, S_IRUGO); -module_param_named(dif, scsi_debug_dif, int, S_IRUGO); -module_param_named(guard, scsi_debug_guard, int, S_IRUGO); -module_param_named(ato, scsi_debug_ato, int, S_IRUGO); -module_param_named(physblk_exp, scsi_debug_physblk_exp, int, S_IRUGO); -module_param_named(opt_blks, scsi_debug_opt_blks, int, S_IRUGO); -module_param_named(lowest_aligned, scsi_debug_lowest_aligned, int, S_IRUGO); +module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); +module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); module_param_named(unmap_max_blocks, scsi_debug_unmap_max_blocks, int, S_IRUGO); module_param_named(unmap_max_desc, scsi_debug_unmap_max_desc, int, S_IRUGO); -module_param_named(unmap_granularity, scsi_debug_unmap_granularity, int, S_IRUGO); -module_param_named(unmap_alignment, scsi_debug_unmap_alignment, int, S_IRUGO); -module_param_named(tpu, scsi_debug_tpu, int, S_IRUGO); -module_param_named(tpws, scsi_debug_tpws, int, S_IRUGO); +module_param_named(virtual_gb, scsi_debug_virtual_gb, int, S_IRUGO | S_IWUSR); +module_param_named(vpd_use_hostno, scsi_debug_vpd_use_hostno, int, + S_IRUGO | S_IWUSR); +module_param_named(write_same_length, scsi_debug_write_same_length, int, + S_IRUGO | S_IWUSR); MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert"); MODULE_DESCRIPTION("SCSI debug adapter driver"); @@ -2733,36 +2759,38 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(SCSI_DEBUG_VERSION); MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)"); +MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)"); MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)"); +MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); +MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)"); MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)"); MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)"); +MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); +MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)"); +MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)"); MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to 255(def))"); MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)"); MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))"); MODULE_PARM_DESC(num_parts, "number of partitions(def=0)"); MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)"); +MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)"); MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)"); +MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); -MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); -MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); -MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); -MODULE_PARM_DESC(opt_blks, "optimal transfer length in block (def=64)"); -MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)"); -MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)"); -MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); -MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)"); -MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); +MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); +MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)"); MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)"); -MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); -MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); -MODULE_PARM_DESC(tpu, "enable TP, support UNMAP command (def=0)"); -MODULE_PARM_DESC(tpws, "enable TP, support WRITE SAME(16) with UNMAP bit (def=0)"); +MODULE_PARM_DESC(virtual_gb, "virtual gigabyte size (def=0 -> use dev_size_mb)"); +MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)"); +MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)"); static char sdebug_info[256]; @@ -3150,7 +3178,7 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf) { ssize_t count; - if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) + if (!scsi_debug_lbp()) return scnprintf(buf, PAGE_SIZE, "0-%u\n", sdebug_store_sectors); @@ -3333,8 +3361,8 @@ static int __init scsi_debug_init(void) memset(dif_storep, 0xff, dif_size); } - /* Thin Provisioning */ - if (scsi_debug_tpu || scsi_debug_tpws) { + /* Logical Block Provisioning */ + if (scsi_debug_lbp()) { unsigned int map_bytes; scsi_debug_unmap_max_blocks = @@ -3664,7 +3692,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) errsts = resp_readcap16(SCpnt, devip); else if (cmd[1] == SAI_GET_LBA_STATUS) { - if (scsi_debug_tpu == 0 && scsi_debug_tpws == 0) { + if (scsi_debug_lbp() == 0) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_COMMAND_OPCODE, 0); errsts = check_condition_result; @@ -3775,8 +3803,10 @@ write: } break; case WRITE_SAME_16: + case WRITE_SAME: if (cmd[1] & 0x8) { - if (scsi_debug_tpws == 0) { + if ((*cmd == WRITE_SAME_16 && scsi_debug_lbpws == 0) || + (*cmd == WRITE_SAME && scsi_debug_lbpws10 == 0)) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0); errsts = check_condition_result; @@ -3785,8 +3815,6 @@ write: } if (errsts) break; - /* fall through */ - case WRITE_SAME: errsts = check_readiness(SCpnt, 0, devip); if (errsts) break; @@ -3798,7 +3826,7 @@ write: if (errsts) break; - if (scsi_debug_unmap_max_desc == 0 || scsi_debug_tpu == 0) { + if (scsi_debug_unmap_max_desc == 0 || scsi_debug_lbpu == 0) { mk_sense_buffer(devip, ILLEGAL_REQUEST, INVALID_COMMAND_OPCODE, 0); errsts = check_condition_result; diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 43fad4c09beb..82e9e5c0476e 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -382,6 +382,91 @@ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, EXPORT_SYMBOL(scsi_dev_info_list_add_keyed); /** + * scsi_dev_info_list_del_keyed - remove one dev_info list entry. + * @vendor: vendor string + * @model: model (product) string + * @key: specify list to use + * + * Description: + * Remove and destroy one dev_info entry for @vendor, @model + * in list specified by @key. + * + * Returns: 0 OK, -error on failure. + **/ +int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key) +{ + struct scsi_dev_info_list *devinfo, *found = NULL; + struct scsi_dev_info_list_table *devinfo_table = + scsi_devinfo_lookup_by_key(key); + + if (IS_ERR(devinfo_table)) + return PTR_ERR(devinfo_table); + + list_for_each_entry(devinfo, &devinfo_table->scsi_dev_info_list, + dev_info_list) { + if (devinfo->compatible) { + /* + * Behave like the older version of get_device_flags. + */ + size_t max; + /* + * XXX why skip leading spaces? If an odd INQUIRY + * value, that should have been part of the + * scsi_static_device_list[] entry, such as " FOO" + * rather than "FOO". Since this code is already + * here, and we don't know what device it is + * trying to work with, leave it as-is. + */ + max = 8; /* max length of vendor */ + while ((max > 0) && *vendor == ' ') { + max--; + vendor++; + } + /* + * XXX removing the following strlen() would be + * good, using it means that for a an entry not in + * the list, we scan every byte of every vendor + * listed in scsi_static_device_list[], and never match + * a single one (and still have to compare at + * least the first byte of each vendor). + */ + if (memcmp(devinfo->vendor, vendor, + min(max, strlen(devinfo->vendor)))) + continue; + /* + * Skip spaces again. + */ + max = 16; /* max length of model */ + while ((max > 0) && *model == ' ') { + max--; + model++; + } + if (memcmp(devinfo->model, model, + min(max, strlen(devinfo->model)))) + continue; + found = devinfo; + } else { + if (!memcmp(devinfo->vendor, vendor, + sizeof(devinfo->vendor)) && + !memcmp(devinfo->model, model, + sizeof(devinfo->model))) + found = devinfo; + } + if (found) + break; + } + + if (found) { + list_del(&found->dev_info_list); + kfree(found); + return 0; + } + + return -ENOENT; +} +EXPORT_SYMBOL(scsi_dev_info_list_del_keyed); + +/** * scsi_dev_info_list_add_str - parse dev_list and add to the scsi_dev_info_list. * @dev_list: string of device flags to add * diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 45c75649b9e0..991de3c15cfc 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -223,7 +223,7 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, * @scmd: Cmd to have sense checked. * * Return value: - * SUCCESS or FAILED or NEEDS_RETRY + * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR * * Notes: * When a deferred error is detected the current command has @@ -326,17 +326,19 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) */ return SUCCESS; - /* these three are not supported */ + /* these are not supported */ case COPY_ABORTED: case VOLUME_OVERFLOW: case MISCOMPARE: - return SUCCESS; + case BLANK_CHECK: + case DATA_PROTECT: + return TARGET_ERROR; case MEDIUM_ERROR: if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */ sshdr.asc == 0x13 || /* AMNF DATA FIELD */ sshdr.asc == 0x14) { /* RECORD NOT FOUND */ - return SUCCESS; + return TARGET_ERROR; } return NEEDS_RETRY; @@ -344,11 +346,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (scmd->device->retry_hwerror) return ADD_TO_MLQUEUE; else - return SUCCESS; + return TARGET_ERROR; case ILLEGAL_REQUEST: - case BLANK_CHECK: - case DATA_PROTECT: default: return SUCCESS; } @@ -787,6 +787,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, case SUCCESS: case NEEDS_RETRY: case FAILED: + case TARGET_ERROR: break; case ADD_TO_MLQUEUE: rtn = NEEDS_RETRY; @@ -1469,6 +1470,14 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) rtn = scsi_check_sense(scmd); if (rtn == NEEDS_RETRY) goto maybe_retry; + else if (rtn == TARGET_ERROR) { + /* + * Need to modify host byte to signal a + * permanent target failure + */ + scmd->result |= (DID_TARGET_FAILURE << 16); + rtn = SUCCESS; + } /* if rtn == FAILED, we have no sense information; * returning FAILED will wake the error handler thread * to collect the sense and redo the decide @@ -1486,6 +1495,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd) case RESERVATION_CONFLICT: sdev_printk(KERN_INFO, scmd->device, "reservation conflict\n"); + scmd->result |= (DID_NEXUS_FAILURE << 16); return SUCCESS; /* causes immediate i/o error */ default: return FAILED; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index fb2bb35c62cb..2d63c8ad1442 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -667,6 +667,30 @@ void scsi_release_buffers(struct scsi_cmnd *cmd) } EXPORT_SYMBOL(scsi_release_buffers); +static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) +{ + int error = 0; + + switch(host_byte(result)) { + case DID_TRANSPORT_FAILFAST: + error = -ENOLINK; + break; + case DID_TARGET_FAILURE: + cmd->result |= (DID_OK << 16); + error = -EREMOTEIO; + break; + case DID_NEXUS_FAILURE: + cmd->result |= (DID_OK << 16); + error = -EBADE; + break; + default: + error = -EIO; + break; + } + + return error; +} + /* * Function: scsi_io_completion() * @@ -737,7 +761,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) req->sense_len = len; } if (!sense_deferred) - error = -EIO; + error = __scsi_error_from_host_byte(cmd, result); } req->resid_len = scsi_get_resid(cmd); @@ -796,7 +820,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) return; - error = -EIO; + error = __scsi_error_from_host_byte(cmd, result); if (host_byte(result) == DID_RESET) { /* Third party bus reset or reset for error recovery @@ -843,6 +867,13 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) description = "Host Data Integrity Failure"; action = ACTION_FAIL; error = -EILSEQ; + /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ + } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && + (cmd->cmnd[0] == UNMAP || + cmd->cmnd[0] == WRITE_SAME_16 || + cmd->cmnd[0] == WRITE_SAME)) { + description = "Discard failure"; + action = ACTION_FAIL; } else action = ACTION_FAIL; break; @@ -1038,6 +1069,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, cmd->request = req; cmd->cmnd = req->cmd; + cmd->prot_op = SCSI_PROT_NORMAL; return cmd; } diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 342ee1a9c41d..2a588955423a 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -45,6 +45,7 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) enum { SCSI_DEVINFO_GLOBAL = 0, SCSI_DEVINFO_SPI, + SCSI_DEVINFO_DH, }; extern int scsi_get_device_flags(struct scsi_device *sdev, @@ -56,6 +57,7 @@ extern int scsi_get_device_flags_keyed(struct scsi_device *sdev, extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, char *strflags, int flags, int key); +extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key); extern int scsi_dev_info_add_list(int key, const char *name); extern int scsi_dev_info_remove_list(int key); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index f905ecb5704d..b4218390941e 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -954,6 +954,7 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) if (dd_size) conn->dd_data = &conn[1]; + mutex_init(&conn->ep_mutex); INIT_LIST_HEAD(&conn->conn_list); conn->transport = transport; conn->cid = cid; @@ -975,7 +976,6 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) spin_lock_irqsave(&connlock, flags); list_add(&conn->conn_list, &connlist); - conn->active = 1; spin_unlock_irqrestore(&connlock, flags); ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n"); @@ -1001,7 +1001,6 @@ int iscsi_destroy_conn(struct iscsi_cls_conn *conn) unsigned long flags; spin_lock_irqsave(&connlock, flags); - conn->active = 0; list_del(&conn->conn_list); spin_unlock_irqrestore(&connlock, flags); @@ -1430,6 +1429,29 @@ release_host: return err; } +static int iscsi_if_ep_disconnect(struct iscsi_transport *transport, + u64 ep_handle) +{ + struct iscsi_cls_conn *conn; + struct iscsi_endpoint *ep; + + if (!transport->ep_disconnect) + return -EINVAL; + + ep = iscsi_lookup_endpoint(ep_handle); + if (!ep) + return -EINVAL; + conn = ep->conn; + if (conn) { + mutex_lock(&conn->ep_mutex); + conn->ep = NULL; + mutex_unlock(&conn->ep_mutex); + } + + transport->ep_disconnect(ep); + return 0; +} + static int iscsi_if_transport_ep(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type) @@ -1454,14 +1476,8 @@ iscsi_if_transport_ep(struct iscsi_transport *transport, ev->u.ep_poll.timeout_ms); break; case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: - if (!transport->ep_disconnect) - return -EINVAL; - - ep = iscsi_lookup_endpoint(ev->u.ep_disconnect.ep_handle); - if (!ep) - return -EINVAL; - - transport->ep_disconnect(ep); + rc = iscsi_if_ep_disconnect(transport, + ev->u.ep_disconnect.ep_handle); break; } return rc; @@ -1609,12 +1625,31 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) session = iscsi_session_lookup(ev->u.b_conn.sid); conn = iscsi_conn_lookup(ev->u.b_conn.sid, ev->u.b_conn.cid); - if (session && conn) - ev->r.retcode = transport->bind_conn(session, conn, - ev->u.b_conn.transport_eph, - ev->u.b_conn.is_leading); - else + if (conn && conn->ep) + iscsi_if_ep_disconnect(transport, conn->ep->id); + + if (!session || !conn) { err = -EINVAL; + break; + } + + ev->r.retcode = transport->bind_conn(session, conn, + ev->u.b_conn.transport_eph, + ev->u.b_conn.is_leading); + if (ev->r.retcode || !transport->ep_connect) + break; + + ep = iscsi_lookup_endpoint(ev->u.b_conn.transport_eph); + if (ep) { + ep->conn = conn; + + mutex_lock(&conn->ep_mutex); + conn->ep = ep; + mutex_unlock(&conn->ep_mutex); + } else + iscsi_cls_conn_printk(KERN_ERR, conn, + "Could not set ep conn " + "binding\n"); break; case ISCSI_UEVENT_SET_PARAM: err = iscsi_set_param(transport, ev); @@ -1747,13 +1782,48 @@ iscsi_conn_attr(data_digest, ISCSI_PARAM_DATADGST_EN); iscsi_conn_attr(ifmarker, ISCSI_PARAM_IFMARKER_EN); iscsi_conn_attr(ofmarker, ISCSI_PARAM_OFMARKER_EN); iscsi_conn_attr(persistent_port, ISCSI_PARAM_PERSISTENT_PORT); -iscsi_conn_attr(port, ISCSI_PARAM_CONN_PORT); iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN); iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS); -iscsi_conn_attr(address, ISCSI_PARAM_CONN_ADDRESS); iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO); iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO); +#define iscsi_conn_ep_attr_show(param) \ +static ssize_t show_conn_ep_param_##param(struct device *dev, \ + struct device_attribute *attr,\ + char *buf) \ +{ \ + struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent); \ + struct iscsi_transport *t = conn->transport; \ + struct iscsi_endpoint *ep; \ + ssize_t rc; \ + \ + /* \ + * Need to make sure ep_disconnect does not free the LLD's \ + * interconnect resources while we are trying to read them. \ + */ \ + mutex_lock(&conn->ep_mutex); \ + ep = conn->ep; \ + if (!ep && t->ep_connect) { \ + mutex_unlock(&conn->ep_mutex); \ + return -ENOTCONN; \ + } \ + \ + if (ep) \ + rc = t->get_ep_param(ep, param, buf); \ + else \ + rc = t->get_conn_param(conn, param, buf); \ + mutex_unlock(&conn->ep_mutex); \ + return rc; \ +} + +#define iscsi_conn_ep_attr(field, param) \ + iscsi_conn_ep_attr_show(param) \ +static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \ + show_conn_ep_param_##param, NULL); + +iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS); +iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT); + /* * iSCSI session attrs */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e56730214c05..3be5db5d6343 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -96,6 +96,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC); #define SD_MINORS 0 #endif +static void sd_config_discard(struct scsi_disk *, unsigned int); static int sd_revalidate_disk(struct gendisk *); static void sd_unlock_native_capacity(struct gendisk *disk); static int sd_probe(struct device *); @@ -294,7 +295,54 @@ sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr, { struct scsi_disk *sdkp = to_scsi_disk(dev); - return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning); + return snprintf(buf, 20, "%u\n", sdkp->lbpme); +} + +static const char *lbp_mode[] = { + [SD_LBP_FULL] = "full", + [SD_LBP_UNMAP] = "unmap", + [SD_LBP_WS16] = "writesame_16", + [SD_LBP_WS10] = "writesame_10", + [SD_LBP_ZERO] = "writesame_zero", + [SD_LBP_DISABLE] = "disabled", +}; + +static ssize_t +sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + + return snprintf(buf, 20, "%s\n", lbp_mode[sdkp->provisioning_mode]); +} + +static ssize_t +sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + if (sdp->type != TYPE_DISK) + return -EINVAL; + + if (!strncmp(buf, lbp_mode[SD_LBP_UNMAP], 20)) + sd_config_discard(sdkp, SD_LBP_UNMAP); + else if (!strncmp(buf, lbp_mode[SD_LBP_WS16], 20)) + sd_config_discard(sdkp, SD_LBP_WS16); + else if (!strncmp(buf, lbp_mode[SD_LBP_WS10], 20)) + sd_config_discard(sdkp, SD_LBP_WS10); + else if (!strncmp(buf, lbp_mode[SD_LBP_ZERO], 20)) + sd_config_discard(sdkp, SD_LBP_ZERO); + else if (!strncmp(buf, lbp_mode[SD_LBP_DISABLE], 20)) + sd_config_discard(sdkp, SD_LBP_DISABLE); + else + return -EINVAL; + + return count; } static struct device_attribute sd_disk_attrs[] = { @@ -309,6 +357,8 @@ static struct device_attribute sd_disk_attrs[] = { __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL), __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), + __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, + sd_store_provisioning_mode), __ATTR_NULL, }; @@ -433,6 +483,49 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif) scsi_set_prot_type(scmd, dif); } +static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) +{ + struct request_queue *q = sdkp->disk->queue; + unsigned int logical_block_size = sdkp->device->sector_size; + unsigned int max_blocks = 0; + + q->limits.discard_zeroes_data = sdkp->lbprz; + q->limits.discard_alignment = sdkp->unmap_alignment; + q->limits.discard_granularity = + max(sdkp->physical_block_size, + sdkp->unmap_granularity * logical_block_size); + + switch (mode) { + + case SD_LBP_DISABLE: + q->limits.max_discard_sectors = 0; + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + return; + + case SD_LBP_UNMAP: + max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff); + break; + + case SD_LBP_WS16: + max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff); + break; + + case SD_LBP_WS10: + max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); + break; + + case SD_LBP_ZERO: + max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff); + q->limits.discard_zeroes_data = 1; + break; + } + + q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9); + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + + sdkp->provisioning_mode = mode; +} + /** * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device * @sdp: scsi device to operate one @@ -449,6 +542,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) unsigned int nr_sectors = bio_sectors(bio); unsigned int len; int ret; + char *buf; struct page *page; if (sdkp->device->sector_size == 4096) { @@ -464,8 +558,9 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) if (!page) return BLKPREP_DEFER; - if (sdkp->unmap) { - char *buf = page_address(page); + switch (sdkp->provisioning_mode) { + case SD_LBP_UNMAP: + buf = page_address(page); rq->cmd_len = 10; rq->cmd[0] = UNMAP; @@ -477,7 +572,9 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) put_unaligned_be32(nr_sectors, &buf[16]); len = 24; - } else { + break; + + case SD_LBP_WS16: rq->cmd_len = 16; rq->cmd[0] = WRITE_SAME_16; rq->cmd[1] = 0x8; /* UNMAP */ @@ -485,11 +582,29 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) put_unaligned_be32(nr_sectors, &rq->cmd[10]); len = sdkp->device->sector_size; + break; + + case SD_LBP_WS10: + case SD_LBP_ZERO: + rq->cmd_len = 10; + rq->cmd[0] = WRITE_SAME; + if (sdkp->provisioning_mode == SD_LBP_WS10) + rq->cmd[1] = 0x8; /* UNMAP */ + put_unaligned_be32(sector, &rq->cmd[2]); + put_unaligned_be16(nr_sectors, &rq->cmd[7]); + + len = sdkp->device->sector_size; + break; + + default: + goto out; } blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); rq->buffer = page_address(page); + +out: if (ret != BLKPREP_OK) { __free_page(page); rq->buffer = NULL; @@ -1251,12 +1366,10 @@ static int sd_done(struct scsi_cmnd *SCpnt) struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); int sense_valid = 0; int sense_deferred = 0; + unsigned char op = SCpnt->cmnd[0]; - if (SCpnt->request->cmd_flags & REQ_DISCARD) { - if (!result) - scsi_set_resid(SCpnt, 0); - return good_bytes; - } + if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result) + scsi_set_resid(SCpnt, 0); if (result) { sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); @@ -1295,10 +1408,17 @@ static int sd_done(struct scsi_cmnd *SCpnt) SCpnt->result = 0; memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); break; - case ABORTED_COMMAND: /* DIF: Target detected corruption */ - case ILLEGAL_REQUEST: /* DIX: Host detected corruption */ - if (sshdr.asc == 0x10) + case ABORTED_COMMAND: + if (sshdr.asc == 0x10) /* DIF: Target detected corruption */ + good_bytes = sd_completed_bytes(SCpnt); + break; + case ILLEGAL_REQUEST: + if (sshdr.asc == 0x10) /* DIX: Host detected corruption */ good_bytes = sd_completed_bytes(SCpnt); + /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ + if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && + (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME)) + sd_config_discard(sdkp, SD_LBP_DISABLE); break; default: break; @@ -1596,17 +1716,13 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, sd_printk(KERN_NOTICE, sdkp, "physical block alignment offset: %u\n", alignment); - if (buffer[14] & 0x80) { /* TPE */ - struct request_queue *q = sdp->request_queue; + if (buffer[14] & 0x80) { /* LBPME */ + sdkp->lbpme = 1; - sdkp->thin_provisioning = 1; - q->limits.discard_granularity = sdkp->physical_block_size; - q->limits.max_discard_sectors = 0xffffffff; + if (buffer[14] & 0x40) /* LBPRZ */ + sdkp->lbprz = 1; - if (buffer[14] & 0x40) /* TPRZ */ - q->limits.discard_zeroes_data = 1; - - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + sd_config_discard(sdkp, SD_LBP_WS16); } sdkp->capacity = lba + 1; @@ -2091,7 +2207,6 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer) */ static void sd_read_block_limits(struct scsi_disk *sdkp) { - struct request_queue *q = sdkp->disk->queue; unsigned int sector_sz = sdkp->device->sector_size; const int vpd_len = 64; unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); @@ -2106,39 +2221,46 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) blk_queue_io_opt(sdkp->disk->queue, get_unaligned_be32(&buffer[12]) * sector_sz); - /* Thin provisioning enabled and page length indicates TP support */ - if (sdkp->thin_provisioning && buffer[3] == 0x3c) { - unsigned int lba_count, desc_count, granularity; + if (buffer[3] == 0x3c) { + unsigned int lba_count, desc_count; - lba_count = get_unaligned_be32(&buffer[20]); - desc_count = get_unaligned_be32(&buffer[24]); - - if (lba_count && desc_count) { - if (sdkp->tpvpd && !sdkp->tpu) - sdkp->unmap = 0; - else - sdkp->unmap = 1; - } + sdkp->max_ws_blocks = + (u32) min_not_zero(get_unaligned_be64(&buffer[36]), + (u64)0xffffffff); - if (sdkp->tpvpd && !sdkp->tpu && !sdkp->tpws) { - sd_printk(KERN_ERR, sdkp, "Thin provisioning is " \ - "enabled but neither TPU, nor TPWS are " \ - "set. Disabling discard!\n"); + if (!sdkp->lbpme) goto out; - } - if (lba_count) - q->limits.max_discard_sectors = - lba_count * sector_sz >> 9; + lba_count = get_unaligned_be32(&buffer[20]); + desc_count = get_unaligned_be32(&buffer[24]); - granularity = get_unaligned_be32(&buffer[28]); + if (lba_count && desc_count) + sdkp->max_unmap_blocks = lba_count; - if (granularity) - q->limits.discard_granularity = granularity * sector_sz; + sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]); if (buffer[32] & 0x80) - q->limits.discard_alignment = + sdkp->unmap_alignment = get_unaligned_be32(&buffer[32]) & ~(1 << 31); + + if (!sdkp->lbpvpd) { /* LBP VPD page not provided */ + + if (sdkp->max_unmap_blocks) + sd_config_discard(sdkp, SD_LBP_UNMAP); + else + sd_config_discard(sdkp, SD_LBP_WS16); + + } else { /* LBP VPD page tells us what to use */ + + if (sdkp->lbpu && sdkp->max_unmap_blocks) + sd_config_discard(sdkp, SD_LBP_UNMAP); + else if (sdkp->lbpws) + sd_config_discard(sdkp, SD_LBP_WS16); + else if (sdkp->lbpws10) + sd_config_discard(sdkp, SD_LBP_WS10); + else + sd_config_discard(sdkp, SD_LBP_DISABLE); + } } out: @@ -2172,15 +2294,15 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) } /** - * sd_read_thin_provisioning - Query thin provisioning VPD page + * sd_read_block_provisioning - Query provisioning VPD page * @disk: disk to query */ -static void sd_read_thin_provisioning(struct scsi_disk *sdkp) +static void sd_read_block_provisioning(struct scsi_disk *sdkp) { unsigned char *buffer; const int vpd_len = 8; - if (sdkp->thin_provisioning == 0) + if (sdkp->lbpme == 0) return; buffer = kmalloc(vpd_len, GFP_KERNEL); @@ -2188,9 +2310,10 @@ static void sd_read_thin_provisioning(struct scsi_disk *sdkp) if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len)) goto out; - sdkp->tpvpd = 1; - sdkp->tpu = (buffer[5] >> 7) & 1; /* UNMAP */ - sdkp->tpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */ + sdkp->lbpvpd = 1; + sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */ + sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */ + sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */ out: kfree(buffer); @@ -2247,7 +2370,7 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_read_capacity(sdkp, buffer); if (sd_try_extended_inquiry(sdp)) { - sd_read_thin_provisioning(sdkp); + sd_read_block_provisioning(sdkp); sd_read_block_limits(sdkp); sd_read_block_characteristics(sdkp); } diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index c9d8f6ca49e2..6ad798bfd52a 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -43,6 +43,15 @@ enum { SD_MEMPOOL_SIZE = 2, /* CDB pool size */ }; +enum { + SD_LBP_FULL = 0, /* Full logical block provisioning */ + SD_LBP_UNMAP, /* Use UNMAP command */ + SD_LBP_WS16, /* Use WRITE SAME(16) with UNMAP bit */ + SD_LBP_WS10, /* Use WRITE SAME(10) with UNMAP bit */ + SD_LBP_ZERO, /* Use WRITE SAME(10) with zero payload */ + SD_LBP_DISABLE, /* Discard disabled due to failed cmd */ +}; + struct scsi_disk { struct scsi_driver *driver; /* always &sd_template */ struct scsi_device *device; @@ -50,21 +59,27 @@ struct scsi_disk { struct gendisk *disk; atomic_t openers; sector_t capacity; /* size in 512-byte sectors */ + u32 max_ws_blocks; + u32 max_unmap_blocks; + u32 unmap_granularity; + u32 unmap_alignment; u32 index; unsigned int physical_block_size; u8 media_present; u8 write_prot; u8 protection_type;/* Data Integrity Field */ + u8 provisioning_mode; unsigned ATO : 1; /* state of disk ATO bit */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ unsigned first_scan : 1; - unsigned thin_provisioning : 1; - unsigned unmap : 1; - unsigned tpws : 1; - unsigned tpu : 1; - unsigned tpvpd : 1; + unsigned lbpme : 1; + unsigned lbprz : 1; + unsigned lbpu : 1; + unsigned lbpws : 1; + unsigned lbpws10 : 1; + unsigned lbpvpd : 1; }; #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index cdeb01f45bc2..fc14b8dea0d7 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -167,13 +167,13 @@ config SPI_IMX_VER_0_0 def_bool y if SOC_IMX21 || SOC_IMX27 config SPI_IMX_VER_0_4 - def_bool y if ARCH_MX31 + def_bool y if SOC_IMX31 config SPI_IMX_VER_0_7 - def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 || ARCH_MX53 + def_bool y if ARCH_MX25 || SOC_IMX35 || SOC_IMX51 || SOC_IMX53 config SPI_IMX_VER_2_3 - def_bool y if ARCH_MX51 || ARCH_MX53 + def_bool y if SOC_IMX51 || SOC_IMX53 config SPI_IMX tristate "Freescale i.MX SPI controllers" @@ -369,6 +369,16 @@ config SPI_TEGRA help SPI driver for NVidia Tegra SoCs +config SPI_TI_SSP + tristate "TI Sequencer Serial Port - SPI Support" + depends on MFD_TI_SSP + help + This selects an SPI master implementation using a TI sequencer + serial port. + + To compile this driver as a module, choose M here: the + module will be called ti-ssp-spi. + config SPI_TOPCLIFF_PCH tristate "Topcliff PCH SPI Controller" depends on PCI diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 8b5a315045a6..fd2fc5f6505f 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o +obj-$(CONFIG_SPI_TI_SSP) += ti-ssp-spi.o obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o obj-$(CONFIG_SPI_TXX9) += spi_txx9.o obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index 14a451b2ad63..5c2b092a915e 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c @@ -2020,7 +2020,7 @@ static void pl022_cleanup(struct spi_device *spi) static int __devinit -pl022_probe(struct amba_device *adev, struct amba_id *id) +pl022_probe(struct amba_device *adev, const struct amba_id *id) { struct device *dev = &adev->dev; struct pl022_ssp_controller *platform_info = adev->dev.platform_data; diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c index e90c2d6a84b3..1f0ed8005c91 100644 --- a/drivers/spi/davinci_spi.c +++ b/drivers/spi/davinci_spi.c @@ -807,7 +807,6 @@ static int davinci_spi_probe(struct platform_device *pdev) struct resource *r, *mem; resource_size_t dma_rx_chan = SPI_NO_RESOURCE; resource_size_t dma_tx_chan = SPI_NO_RESOURCE; - resource_size_t dma_eventq = SPI_NO_RESOURCE; int i = 0, ret = 0; u32 spipc0; @@ -895,17 +894,13 @@ static int davinci_spi_probe(struct platform_device *pdev) r = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (r) dma_tx_chan = r->start; - r = platform_get_resource(pdev, IORESOURCE_DMA, 2); - if (r) - dma_eventq = r->start; dspi->bitbang.txrx_bufs = davinci_spi_bufs; if (dma_rx_chan != SPI_NO_RESOURCE && - dma_tx_chan != SPI_NO_RESOURCE && - dma_eventq != SPI_NO_RESOURCE) { + dma_tx_chan != SPI_NO_RESOURCE) { dspi->dma.rx_channel = dma_rx_chan; dspi->dma.tx_channel = dma_tx_chan; - dspi->dma.eventq = dma_eventq; + dspi->dma.eventq = pdata->dma_event_q; ret = davinci_spi_request_dma(dspi); if (ret) @@ -914,7 +909,7 @@ static int davinci_spi_probe(struct platform_device *pdev) dev_info(&pdev->dev, "DMA: supported\n"); dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, " "event queue: %d\n", dma_rx_chan, dma_tx_chan, - dma_eventq); + pdata->dma_event_q); } dspi->get_rx = davinci_spi_rx_buf_u8; diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index 6a982a25b7c3..3a5ed06d3d2f 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -3,7 +3,7 @@ * * Copyright (C) 2005, 2006 Nokia Corporation * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and - * Juha Yrjl <juha.yrjola@nokia.com> + * Juha Yrj�l� <juha.yrjola@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -33,6 +33,7 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/pm_runtime.h> #include <linux/spi/spi.h> @@ -46,7 +47,6 @@ #define OMAP2_MCSPI_MAX_CTRL 4 #define OMAP2_MCSPI_REVISION 0x00 -#define OMAP2_MCSPI_SYSCONFIG 0x10 #define OMAP2_MCSPI_SYSSTATUS 0x14 #define OMAP2_MCSPI_IRQSTATUS 0x18 #define OMAP2_MCSPI_IRQENABLE 0x1c @@ -63,13 +63,6 @@ /* per-register bitmasks: */ -#define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4) -#define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2) -#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0) -#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1) - -#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0) - #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) @@ -122,13 +115,12 @@ struct omap2_mcspi { spinlock_t lock; struct list_head msg_queue; struct spi_master *master; - struct clk *ick; - struct clk *fck; /* Virtual base address of the controller */ void __iomem *base; unsigned long phys; /* SPI1 has 4 channels, while SPI2 has 2 */ struct omap2_mcspi_dma *dma_channels; + struct device *dev; }; struct omap2_mcspi_cs { @@ -144,7 +136,6 @@ struct omap2_mcspi_cs { * corresponding registers are modified. */ struct omap2_mcspi_regs { - u32 sysconfig; u32 modulctrl; u32 wakeupenable; struct list_head cs; @@ -268,9 +259,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); - mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG, - omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig); - mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); @@ -280,20 +268,12 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) } static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) { - clk_disable(mcspi->ick); - clk_disable(mcspi->fck); + pm_runtime_put_sync(mcspi->dev); } static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) { - if (clk_enable(mcspi->ick)) - return -ENODEV; - if (clk_enable(mcspi->fck)) - return -ENODEV; - - omap2_mcspi_restore_ctx(mcspi); - - return 0; + return pm_runtime_get_sync(mcspi->dev); } static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) @@ -829,8 +809,9 @@ static int omap2_mcspi_setup(struct spi_device *spi) return ret; } - if (omap2_mcspi_enable_clocks(mcspi)) - return -ENODEV; + ret = omap2_mcspi_enable_clocks(mcspi); + if (ret < 0) + return ret; ret = omap2_mcspi_setup_transfer(spi, NULL); omap2_mcspi_disable_clocks(mcspi); @@ -873,10 +854,11 @@ static void omap2_mcspi_work(struct work_struct *work) struct omap2_mcspi *mcspi; mcspi = container_of(work, struct omap2_mcspi, work); - spin_lock_irq(&mcspi->lock); - if (omap2_mcspi_enable_clocks(mcspi)) - goto out; + if (omap2_mcspi_enable_clocks(mcspi) < 0) + return; + + spin_lock_irq(&mcspi->lock); /* We only enable one channel at a time -- the one whose message is * at the head of the queue -- although this controller would gladly @@ -989,10 +971,9 @@ static void omap2_mcspi_work(struct work_struct *work) spin_lock_irq(&mcspi->lock); } - omap2_mcspi_disable_clocks(mcspi); - -out: spin_unlock_irq(&mcspi->lock); + + omap2_mcspi_disable_clocks(mcspi); } static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) @@ -1068,25 +1049,15 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) return 0; } -static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) +static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) { struct spi_master *master = mcspi->master; u32 tmp; + int ret = 0; - if (omap2_mcspi_enable_clocks(mcspi)) - return -1; - - mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, - OMAP2_MCSPI_SYSCONFIG_SOFTRESET); - do { - tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS); - } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); - - tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE | - OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP | - OMAP2_MCSPI_SYSCONFIG_SMARTIDLE; - mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp); - omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp; + ret = omap2_mcspi_enable_clocks(mcspi); + if (ret < 0) + return ret; tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); @@ -1097,91 +1068,26 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) return 0; } -static u8 __initdata spi1_rxdma_id [] = { - OMAP24XX_DMA_SPI1_RX0, - OMAP24XX_DMA_SPI1_RX1, - OMAP24XX_DMA_SPI1_RX2, - OMAP24XX_DMA_SPI1_RX3, -}; - -static u8 __initdata spi1_txdma_id [] = { - OMAP24XX_DMA_SPI1_TX0, - OMAP24XX_DMA_SPI1_TX1, - OMAP24XX_DMA_SPI1_TX2, - OMAP24XX_DMA_SPI1_TX3, -}; - -static u8 __initdata spi2_rxdma_id[] = { - OMAP24XX_DMA_SPI2_RX0, - OMAP24XX_DMA_SPI2_RX1, -}; - -static u8 __initdata spi2_txdma_id[] = { - OMAP24XX_DMA_SPI2_TX0, - OMAP24XX_DMA_SPI2_TX1, -}; - -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ - || defined(CONFIG_ARCH_OMAP4) -static u8 __initdata spi3_rxdma_id[] = { - OMAP24XX_DMA_SPI3_RX0, - OMAP24XX_DMA_SPI3_RX1, -}; +static int omap_mcspi_runtime_resume(struct device *dev) +{ + struct omap2_mcspi *mcspi; + struct spi_master *master; -static u8 __initdata spi3_txdma_id[] = { - OMAP24XX_DMA_SPI3_TX0, - OMAP24XX_DMA_SPI3_TX1, -}; -#endif + master = dev_get_drvdata(dev); + mcspi = spi_master_get_devdata(master); + omap2_mcspi_restore_ctx(mcspi); -#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) -static u8 __initdata spi4_rxdma_id[] = { - OMAP34XX_DMA_SPI4_RX0, -}; + return 0; +} -static u8 __initdata spi4_txdma_id[] = { - OMAP34XX_DMA_SPI4_TX0, -}; -#endif static int __init omap2_mcspi_probe(struct platform_device *pdev) { struct spi_master *master; + struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data; struct omap2_mcspi *mcspi; struct resource *r; int status = 0, i; - const u8 *rxdma_id, *txdma_id; - unsigned num_chipselect; - - switch (pdev->id) { - case 1: - rxdma_id = spi1_rxdma_id; - txdma_id = spi1_txdma_id; - num_chipselect = 4; - break; - case 2: - rxdma_id = spi2_rxdma_id; - txdma_id = spi2_txdma_id; - num_chipselect = 2; - break; -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ - || defined(CONFIG_ARCH_OMAP4) - case 3: - rxdma_id = spi3_rxdma_id; - txdma_id = spi3_txdma_id; - num_chipselect = 2; - break; -#endif -#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) - case 4: - rxdma_id = spi4_rxdma_id; - txdma_id = spi4_txdma_id; - num_chipselect = 1; - break; -#endif - default: - return -EINVAL; - } master = spi_alloc_master(&pdev->dev, sizeof *mcspi); if (master == NULL) { @@ -1198,7 +1104,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) master->setup = omap2_mcspi_setup; master->transfer = omap2_mcspi_transfer; master->cleanup = omap2_mcspi_cleanup; - master->num_chipselect = num_chipselect; + master->num_chipselect = pdata->num_cs; dev_set_drvdata(&pdev->dev, master); @@ -1216,49 +1122,62 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) goto err1; } + r->start += pdata->regs_offset; + r->end += pdata->regs_offset; mcspi->phys = r->start; mcspi->base = ioremap(r->start, r->end - r->start + 1); if (!mcspi->base) { dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); status = -ENOMEM; - goto err1aa; + goto err2; } + mcspi->dev = &pdev->dev; INIT_WORK(&mcspi->work, omap2_mcspi_work); spin_lock_init(&mcspi->lock); INIT_LIST_HEAD(&mcspi->msg_queue); INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); - mcspi->ick = clk_get(&pdev->dev, "ick"); - if (IS_ERR(mcspi->ick)) { - dev_dbg(&pdev->dev, "can't get mcspi_ick\n"); - status = PTR_ERR(mcspi->ick); - goto err1a; - } - mcspi->fck = clk_get(&pdev->dev, "fck"); - if (IS_ERR(mcspi->fck)) { - dev_dbg(&pdev->dev, "can't get mcspi_fck\n"); - status = PTR_ERR(mcspi->fck); - goto err2; - } - mcspi->dma_channels = kcalloc(master->num_chipselect, sizeof(struct omap2_mcspi_dma), GFP_KERNEL); if (mcspi->dma_channels == NULL) - goto err3; + goto err2; + + for (i = 0; i < master->num_chipselect; i++) { + char dma_ch_name[14]; + struct resource *dma_res; + + sprintf(dma_ch_name, "rx%d", i); + dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, + dma_ch_name); + if (!dma_res) { + dev_dbg(&pdev->dev, "cannot get DMA RX channel\n"); + status = -ENODEV; + break; + } - for (i = 0; i < num_chipselect; i++) { mcspi->dma_channels[i].dma_rx_channel = -1; - mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i]; + mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; + sprintf(dma_ch_name, "tx%d", i); + dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, + dma_ch_name); + if (!dma_res) { + dev_dbg(&pdev->dev, "cannot get DMA TX channel\n"); + status = -ENODEV; + break; + } + mcspi->dma_channels[i].dma_tx_channel = -1; - mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i]; + mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; } - if (omap2_mcspi_reset(mcspi) < 0) - goto err4; + pm_runtime_enable(&pdev->dev); + + if (status || omap2_mcspi_master_setup(mcspi) < 0) + goto err3; status = spi_register_master(master); if (status < 0) @@ -1267,17 +1186,13 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) return status; err4: - kfree(mcspi->dma_channels); + spi_master_put(master); err3: - clk_put(mcspi->fck); + kfree(mcspi->dma_channels); err2: - clk_put(mcspi->ick); -err1a: - iounmap(mcspi->base); -err1aa: release_mem_region(r->start, (r->end - r->start) + 1); + iounmap(mcspi->base); err1: - spi_master_put(master); return status; } @@ -1293,9 +1208,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev) mcspi = spi_master_get_devdata(master); dma_channels = mcspi->dma_channels; - clk_put(mcspi->fck); - clk_put(mcspi->ick); - + omap2_mcspi_disable_clocks(mcspi); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(r->start, (r->end - r->start) + 1); @@ -1346,6 +1259,7 @@ static int omap2_mcspi_resume(struct device *dev) static const struct dev_pm_ops omap2_mcspi_pm_ops = { .resume = omap2_mcspi_resume, + .runtime_resume = omap_mcspi_runtime_resume, }; static struct platform_driver omap2_mcspi_driver = { diff --git a/drivers/spi/ti-ssp-spi.c b/drivers/spi/ti-ssp-spi.c new file mode 100644 index 000000000000..ee22795c7973 --- /dev/null +++ b/drivers/spi/ti-ssp-spi.c @@ -0,0 +1,402 @@ +/* + * Sequencer Serial Port (SSP) based SPI master driver + * + * Copyright (C) 2010 Texas Instruments Inc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/mfd/ti_ssp.h> + +#define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH) + +struct ti_ssp_spi { + struct spi_master *master; + struct device *dev; + spinlock_t lock; + struct list_head msg_queue; + struct completion complete; + bool shutdown; + struct workqueue_struct *workqueue; + struct work_struct work; + u8 mode, bpw; + int cs_active; + u32 pc_en, pc_dis, pc_wr, pc_rd; + void (*select)(int cs); +}; + +static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw) +{ + u32 ret; + + ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret); + return ret; +} + +static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data) +{ + ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL); +} + +static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg, + struct spi_transfer *t) +{ + int count; + + if (hw->bpw <= 8) { + u8 *rx = t->rx_buf; + const u8 *tx = t->tx_buf; + + for (count = 0; count < t->len; count += 1) { + if (t->tx_buf) + ti_ssp_spi_tx(hw, *tx++); + if (t->rx_buf) + *rx++ = ti_ssp_spi_rx(hw); + } + } else if (hw->bpw <= 16) { + u16 *rx = t->rx_buf; + const u16 *tx = t->tx_buf; + + for (count = 0; count < t->len; count += 2) { + if (t->tx_buf) + ti_ssp_spi_tx(hw, *tx++); + if (t->rx_buf) + *rx++ = ti_ssp_spi_rx(hw); + } + } else { + u32 *rx = t->rx_buf; + const u32 *tx = t->tx_buf; + + for (count = 0; count < t->len; count += 4) { + if (t->tx_buf) + ti_ssp_spi_tx(hw, *tx++); + if (t->rx_buf) + *rx++ = ti_ssp_spi_rx(hw); + } + } + + msg->actual_length += count; /* bytes transferred */ + + dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n", + t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len, + hw->bpw, count, (count < t->len) ? " (under)" : ""); + + return (count < t->len) ? -EIO : 0; /* left over data */ +} + +static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active) +{ + cs_active = !!cs_active; + if (cs_active == hw->cs_active) + return; + ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL); + hw->cs_active = cs_active; +} + +#define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \ + cs_en | clk | SSP_COUNT((bits) * 2 - 1)) +#define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \ + cs_en | clk | SSP_COUNT((bits) * 2 - 1)) + +static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode) +{ + int error, idx = 0; + u32 seqram[16]; + u32 cs_en, cs_dis, clk; + u32 topbits, botbits; + + mode &= MODE_BITS; + if (mode == hw->mode && bpw == hw->bpw) + return 0; + + cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW; + cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH; + clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW; + + /* Construct instructions */ + + /* Disable Chip Select */ + hw->pc_dis = idx; + seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk; + seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk; + + /* Enable Chip Select */ + hw->pc_en = idx; + seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk; + seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; + + /* Reads and writes need to be split for bpw > 16 */ + topbits = (bpw > 16) ? 16 : bpw; + botbits = bpw - topbits; + + /* Write */ + hw->pc_wr = idx; + seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG; + if (botbits) + seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG; + seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; + + /* Read */ + hw->pc_rd = idx; + if (botbits) + seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG; + seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG; + seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; + + error = ti_ssp_load(hw->dev, 0, seqram, idx); + if (error < 0) + return error; + + error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ? + 0 : SSP_EARLY_DIN)); + if (error < 0) + return error; + + hw->bpw = bpw; + hw->mode = mode; + + return error; +} + +static void ti_ssp_spi_work(struct work_struct *work) +{ + struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work); + + spin_lock(&hw->lock); + + while (!list_empty(&hw->msg_queue)) { + struct spi_message *m; + struct spi_device *spi; + struct spi_transfer *t = NULL; + int status = 0; + + m = container_of(hw->msg_queue.next, struct spi_message, + queue); + + list_del_init(&m->queue); + + spin_unlock(&hw->lock); + + spi = m->spi; + + if (hw->select) + hw->select(spi->chip_select); + + list_for_each_entry(t, &m->transfers, transfer_list) { + int bpw = spi->bits_per_word; + int xfer_status; + + if (t->bits_per_word) + bpw = t->bits_per_word; + + if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0) + break; + + ti_ssp_spi_chip_select(hw, 1); + + xfer_status = ti_ssp_spi_txrx(hw, m, t); + if (xfer_status < 0) + status = xfer_status; + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (t->cs_change) + ti_ssp_spi_chip_select(hw, 0); + } + + ti_ssp_spi_chip_select(hw, 0); + m->status = status; + m->complete(m->context); + + spin_lock(&hw->lock); + } + + if (hw->shutdown) + complete(&hw->complete); + + spin_unlock(&hw->lock); +} + +static int ti_ssp_spi_setup(struct spi_device *spi) +{ + if (spi->bits_per_word > 32) + return -EINVAL; + + return 0; +} + +static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m) +{ + struct ti_ssp_spi *hw; + struct spi_transfer *t; + int error = 0; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + hw = spi_master_get_devdata(spi->master); + + if (list_empty(&m->transfers) || !m->complete) + return -EINVAL; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->len && !(t->rx_buf || t->tx_buf)) { + dev_err(&spi->dev, "invalid xfer, no buffer\n"); + return -EINVAL; + } + + if (t->len && t->rx_buf && t->tx_buf) { + dev_err(&spi->dev, "invalid xfer, full duplex\n"); + return -EINVAL; + } + + if (t->bits_per_word > 32) { + dev_err(&spi->dev, "invalid xfer width %d\n", + t->bits_per_word); + return -EINVAL; + } + } + + spin_lock(&hw->lock); + if (hw->shutdown) { + error = -ESHUTDOWN; + goto error_unlock; + } + list_add_tail(&m->queue, &hw->msg_queue); + queue_work(hw->workqueue, &hw->work); +error_unlock: + spin_unlock(&hw->lock); + return error; +} + +static int __devinit ti_ssp_spi_probe(struct platform_device *pdev) +{ + const struct ti_ssp_spi_data *pdata; + struct ti_ssp_spi *hw; + struct spi_master *master; + struct device *dev = &pdev->dev; + int error = 0; + + pdata = dev->platform_data; + if (!pdata) { + dev_err(dev, "platform data not found\n"); + return -EINVAL; + } + + master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi)); + if (!master) { + dev_err(dev, "cannot allocate SPI master\n"); + return -ENOMEM; + } + + hw = spi_master_get_devdata(master); + platform_set_drvdata(pdev, hw); + + hw->master = master; + hw->dev = dev; + hw->select = pdata->select; + + spin_lock_init(&hw->lock); + init_completion(&hw->complete); + INIT_LIST_HEAD(&hw->msg_queue); + INIT_WORK(&hw->work, ti_ssp_spi_work); + + hw->workqueue = create_singlethread_workqueue(dev_name(dev)); + if (!hw->workqueue) { + error = -ENOMEM; + dev_err(dev, "work queue creation failed\n"); + goto error_wq; + } + + error = ti_ssp_set_iosel(hw->dev, pdata->iosel); + if (error < 0) { + dev_err(dev, "io setup failed\n"); + goto error_iosel; + } + + master->bus_num = pdev->id; + master->num_chipselect = pdata->num_cs; + master->mode_bits = MODE_BITS; + master->flags = SPI_MASTER_HALF_DUPLEX; + master->setup = ti_ssp_spi_setup; + master->transfer = ti_ssp_spi_transfer; + + error = spi_register_master(master); + if (error) { + dev_err(dev, "master registration failed\n"); + goto error_reg; + } + + return 0; + +error_reg: +error_iosel: + destroy_workqueue(hw->workqueue); +error_wq: + spi_master_put(master); + return error; +} + +static int __devexit ti_ssp_spi_remove(struct platform_device *pdev) +{ + struct ti_ssp_spi *hw = platform_get_drvdata(pdev); + int error; + + hw->shutdown = 1; + while (!list_empty(&hw->msg_queue)) { + error = wait_for_completion_interruptible(&hw->complete); + if (error < 0) { + hw->shutdown = 0; + return error; + } + } + destroy_workqueue(hw->workqueue); + spi_unregister_master(hw->master); + + return 0; +} + +static struct platform_driver ti_ssp_spi_driver = { + .probe = ti_ssp_spi_probe, + .remove = __devexit_p(ti_ssp_spi_remove), + .driver = { + .name = "ti-ssp-spi", + .owner = THIS_MODULE, + }, +}; + +static int __init ti_ssp_spi_init(void) +{ + return platform_driver_register(&ti_ssp_spi_driver); +} +module_init(ti_ssp_spi_init); + +static void __exit ti_ssp_spi_exit(void) +{ + platform_driver_unregister(&ti_ssp_spi_driver); +} +module_exit(ti_ssp_spi_exit); + +MODULE_DESCRIPTION("SSP SPI Master"); +MODULE_AUTHOR("Cyril Chemparathy"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ti-ssp-spi"); diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 366080baf474..7f19c8b7b84c 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -667,7 +667,13 @@ target_emulate_readcapacity(struct se_cmd *cmd) { struct se_device *dev = SE_DEV(cmd); unsigned char *buf = cmd->t_task->t_task_buf; - u32 blocks = dev->transport->get_blocks(dev); + unsigned long long blocks_long = dev->transport->get_blocks(dev); + u32 blocks; + + if (blocks_long >= 0x00000000ffffffff) + blocks = 0xffffffff; + else + blocks = (u32)blocks_long; buf[0] = (blocks >> 24) & 0xff; buf[1] = (blocks >> 16) & 0xff; diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c index 73f7d6d81b4c..6ec51cbc018e 100644 --- a/drivers/target/target_core_hba.c +++ b/drivers/target/target_core_hba.c @@ -37,7 +37,6 @@ #include <target/target_core_base.h> #include <target/target_core_device.h> -#include <target/target_core_device.h> #include <target/target_core_tpg.h> #include <target/target_core_transport.h> diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index 3740e327f180..c35f1a73bc8b 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c @@ -177,6 +177,8 @@ static int __init xen_hvc_init(void) } if (xencons_irq < 0) xencons_irq = 0; /* NO_IRQ */ + else + set_irq_noprobe(xencons_irq); hp = hvc_alloc(HVC_COOKIE, xencons_irq, ops, 256); if (IS_ERR(hp)) diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c index bedc6c1b6fa5..bef238f9ffd7 100644 --- a/drivers/tty/hvc/hvcs.c +++ b/drivers/tty/hvc/hvcs.c @@ -292,7 +292,7 @@ struct hvcs_struct { /* * Any variable below the kref is valid before a tty is connected and * stays valid after the tty is disconnected. These shouldn't be - * whacked until the koject refcount reaches zero though some entries + * whacked until the kobject refcount reaches zero though some entries * may be changed via sysfs initiatives. */ struct kref kref; /* ref count & hvcs_struct lifetime */ @@ -309,6 +309,7 @@ struct hvcs_struct { static LIST_HEAD(hvcs_structs); static DEFINE_SPINLOCK(hvcs_structs_lock); +static DEFINE_MUTEX(hvcs_init_mutex); static void hvcs_unthrottle(struct tty_struct *tty); static void hvcs_throttle(struct tty_struct *tty); @@ -340,6 +341,7 @@ static int __devinit hvcs_probe(struct vio_dev *dev, static int __devexit hvcs_remove(struct vio_dev *dev); static int __init hvcs_module_init(void); static void __exit hvcs_module_exit(void); +static int __devinit hvcs_initialize(void); #define HVCS_SCHED_READ 0x00000001 #define HVCS_QUICK_READ 0x00000002 @@ -762,7 +764,7 @@ static int __devinit hvcs_probe( const struct vio_device_id *id) { struct hvcs_struct *hvcsd; - int index; + int index, rc; int retval; if (!dev || !id) { @@ -770,6 +772,13 @@ static int __devinit hvcs_probe( return -EPERM; } + /* Make sure we are properly initialized */ + rc = hvcs_initialize(); + if (rc) { + pr_err("HVCS: Failed to initialize core driver.\n"); + return rc; + } + /* early to avoid cleanup on failure */ index = hvcs_get_index(); if (index < 0) { @@ -1464,12 +1473,15 @@ static void hvcs_free_index_list(void) hvcs_index_count = 0; } -static int __init hvcs_module_init(void) +static int __devinit hvcs_initialize(void) { - int rc; - int num_ttys_to_alloc; + int rc, num_ttys_to_alloc; - printk(KERN_INFO "Initializing %s\n", hvcs_driver_string); + mutex_lock(&hvcs_init_mutex); + if (hvcs_task) { + mutex_unlock(&hvcs_init_mutex); + return 0; + } /* Has the user specified an overload with an insmod param? */ if (hvcs_parm_num_devs <= 0 || @@ -1528,35 +1540,13 @@ static int __init hvcs_module_init(void) hvcs_task = kthread_run(khvcsd, NULL, "khvcsd"); if (IS_ERR(hvcs_task)) { - printk(KERN_ERR "HVCS: khvcsd creation failed. Driver not loaded.\n"); + printk(KERN_ERR "HVCS: khvcsd creation failed.\n"); rc = -EIO; goto kthread_fail; } - - rc = vio_register_driver(&hvcs_vio_driver); - if (rc) { - printk(KERN_ERR "HVCS: can't register vio driver\n"); - goto vio_fail; - } - - /* - * This needs to be done AFTER the vio_register_driver() call or else - * the kobjects won't be initialized properly. - */ - rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan); - if (rc) { - printk(KERN_ERR "HVCS: sysfs attr create failed\n"); - goto attr_fail; - } - - printk(KERN_INFO "HVCS: driver module inserted.\n"); - + mutex_unlock(&hvcs_init_mutex); return 0; -attr_fail: - vio_unregister_driver(&hvcs_vio_driver); -vio_fail: - kthread_stop(hvcs_task); kthread_fail: kfree(hvcs_pi_buff); buff_alloc_fail: @@ -1566,15 +1556,39 @@ register_fail: index_fail: put_tty_driver(hvcs_tty_driver); hvcs_tty_driver = NULL; + mutex_unlock(&hvcs_init_mutex); return rc; } +static int __init hvcs_module_init(void) +{ + int rc = vio_register_driver(&hvcs_vio_driver); + if (rc) { + printk(KERN_ERR "HVCS: can't register vio driver\n"); + return rc; + } + + pr_info("HVCS: Driver registered.\n"); + + /* This needs to be done AFTER the vio_register_driver() call or else + * the kobjects won't be initialized properly. + */ + rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan); + if (rc) + pr_warning(KERN_ERR "HVCS: Failed to create rescan file (err %d)\n", rc); + + return 0; +} + static void __exit hvcs_module_exit(void) { /* * This driver receives hvcs_remove callbacks for each device upon * module removal. */ + vio_unregister_driver(&hvcs_vio_driver); + if (!hvcs_task) + return; /* * This synchronous operation will wake the khvcsd kthread if it is @@ -1589,8 +1603,6 @@ static void __exit hvcs_module_exit(void) driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan); - vio_unregister_driver(&hvcs_vio_driver); - tty_unregister_driver(hvcs_tty_driver); hvcs_free_index_list(); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index e461be164f67..e1aee37270f5 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -537,8 +537,8 @@ config SERIAL_S3C6400 config SERIAL_S5PV210 tristate "Samsung S5PV210 Serial port support" - depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442 || CPU_S5PV310) - select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_S5PV310) + depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442 || CPU_EXYNOS4210) + select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_EXYNOS4210) default y help Serial port support for Samsung's S5P Family of SoC's @@ -1598,4 +1598,18 @@ config SERIAL_MSM_SMD Enables userspace clients to read and write to some streaming SMD ports via tty device interface for MSM chipset. +config SERIAL_MXS_AUART + depends on ARCH_MXS + tristate "MXS AUART support" + select SERIAL_CORE + help + This driver supports the MXS Application UART (AUART) port. + +config SERIAL_MXS_AUART_CONSOLE + bool "MXS AUART console support" + depends on SERIAL_MXS_AUART=y + select SERIAL_CORE_CONSOLE + help + Enable a MXS AUART port to be the system console. + endmenu diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 31e868cb49b2..fee0690ef8e3 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -93,3 +93,4 @@ obj-$(CONFIG_SERIAL_MFD_HSU) += mfd.o obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o +obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c index 2904aa044126..d742dd2c525c 100644 --- a/drivers/tty/serial/amba-pl010.c +++ b/drivers/tty/serial/amba-pl010.c @@ -676,7 +676,7 @@ static struct uart_driver amba_reg = { .cons = AMBA_CONSOLE, }; -static int pl010_probe(struct amba_device *dev, struct amba_id *id) +static int pl010_probe(struct amba_device *dev, const struct amba_id *id) { struct uart_amba_port *uap; void __iomem *base; diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index e76d7d000128..57731e870085 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -96,6 +96,22 @@ static struct vendor_data vendor_st = { }; /* Deals with DMA transactions */ + +struct pl011_sgbuf { + struct scatterlist sg; + char *buf; +}; + +struct pl011_dmarx_data { + struct dma_chan *chan; + struct completion complete; + bool use_buf_b; + struct pl011_sgbuf sgbuf_a; + struct pl011_sgbuf sgbuf_b; + dma_cookie_t cookie; + bool running; +}; + struct pl011_dmatx_data { struct dma_chan *chan; struct scatterlist sg; @@ -120,12 +136,70 @@ struct uart_amba_port { char type[12]; #ifdef CONFIG_DMA_ENGINE /* DMA stuff */ - bool using_dma; + bool using_tx_dma; + bool using_rx_dma; + struct pl011_dmarx_data dmarx; struct pl011_dmatx_data dmatx; #endif }; /* + * Reads up to 256 characters from the FIFO or until it's empty and + * inserts them into the TTY layer. Returns the number of characters + * read from the FIFO. + */ +static int pl011_fifo_to_tty(struct uart_amba_port *uap) +{ + u16 status, ch; + unsigned int flag, max_count = 256; + int fifotaken = 0; + + while (max_count--) { + status = readw(uap->port.membase + UART01x_FR); + if (status & UART01x_FR_RXFE) + break; + + /* Take chars from the FIFO and update status */ + ch = readw(uap->port.membase + UART01x_DR) | + UART_DUMMY_DR_RX; + flag = TTY_NORMAL; + uap->port.icount.rx++; + fifotaken++; + + if (unlikely(ch & UART_DR_ERROR)) { + if (ch & UART011_DR_BE) { + ch &= ~(UART011_DR_FE | UART011_DR_PE); + uap->port.icount.brk++; + if (uart_handle_break(&uap->port)) + continue; + } else if (ch & UART011_DR_PE) + uap->port.icount.parity++; + else if (ch & UART011_DR_FE) + uap->port.icount.frame++; + if (ch & UART011_DR_OE) + uap->port.icount.overrun++; + + ch &= uap->port.read_status_mask; + + if (ch & UART011_DR_BE) + flag = TTY_BREAK; + else if (ch & UART011_DR_PE) + flag = TTY_PARITY; + else if (ch & UART011_DR_FE) + flag = TTY_FRAME; + } + + if (uart_handle_sysrq_char(&uap->port, ch & 255)) + continue; + + uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); + } + + return fifotaken; +} + + +/* * All the DMA operation mode stuff goes inside this ifdef. * This assumes that you have a generic DMA device interface, * no custom DMA interfaces are supported. @@ -134,6 +208,31 @@ struct uart_amba_port { #define PL011_DMA_BUFFER_SIZE PAGE_SIZE +static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, + enum dma_data_direction dir) +{ + sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL); + if (!sg->buf) + return -ENOMEM; + + sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE); + + if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) { + kfree(sg->buf); + return -EINVAL; + } + return 0; +} + +static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, + enum dma_data_direction dir) +{ + if (sg->buf) { + dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir); + kfree(sg->buf); + } +} + static void pl011_dma_probe_initcall(struct uart_amba_port *uap) { /* DMA is the sole user of the platform data right now */ @@ -153,7 +252,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) return; } - /* Try to acquire a generic DMA engine slave channel */ + /* Try to acquire a generic DMA engine slave TX channel */ dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -168,6 +267,28 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap) dev_info(uap->port.dev, "DMA channel TX %s\n", dma_chan_name(uap->dmatx.chan)); + + /* Optionally make use of an RX channel as well */ + if (plat->dma_rx_param) { + struct dma_slave_config rx_conf = { + .src_addr = uap->port.mapbase + UART01x_DR, + .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, + .direction = DMA_FROM_DEVICE, + .src_maxburst = uap->fifosize >> 1, + }; + + chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); + if (!chan) { + dev_err(uap->port.dev, "no RX DMA channel!\n"); + return; + } + + dmaengine_slave_config(chan, &rx_conf); + uap->dmarx.chan = chan; + + dev_info(uap->port.dev, "DMA channel RX %s\n", + dma_chan_name(uap->dmarx.chan)); + } } #ifndef MODULE @@ -219,9 +340,10 @@ static void pl011_dma_remove(struct uart_amba_port *uap) /* TODO: remove the initcall if it has not yet executed */ if (uap->dmatx.chan) dma_release_channel(uap->dmatx.chan); + if (uap->dmarx.chan) + dma_release_channel(uap->dmarx.chan); } - /* Forward declare this for the refill routine */ static int pl011_dma_tx_refill(struct uart_amba_port *uap); @@ -380,7 +502,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) */ static bool pl011_dma_tx_irq(struct uart_amba_port *uap) { - if (!uap->using_dma) + if (!uap->using_tx_dma) return false; /* @@ -432,7 +554,7 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) { u16 dmacr; - if (!uap->using_dma) + if (!uap->using_tx_dma) return false; if (!uap->port.x_char) { @@ -492,7 +614,7 @@ static void pl011_dma_flush_buffer(struct uart_port *port) { struct uart_amba_port *uap = (struct uart_amba_port *)port; - if (!uap->using_dma) + if (!uap->using_tx_dma) return; /* Avoid deadlock with the DMA engine callback */ @@ -508,9 +630,219 @@ static void pl011_dma_flush_buffer(struct uart_port *port) } } +static void pl011_dma_rx_callback(void *data); + +static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) +{ + struct dma_chan *rxchan = uap->dmarx.chan; + struct dma_device *dma_dev; + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_async_tx_descriptor *desc; + struct pl011_sgbuf *sgbuf; + + if (!rxchan) + return -EIO; + + /* Start the RX DMA job */ + sgbuf = uap->dmarx.use_buf_b ? + &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; + dma_dev = rxchan->device; + desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1, + DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + /* + * If the DMA engine is busy and cannot prepare a + * channel, no big deal, the driver will fall back + * to interrupt mode as a result of this error code. + */ + if (!desc) { + uap->dmarx.running = false; + dmaengine_terminate_all(rxchan); + return -EBUSY; + } + + /* Some data to go along to the callback */ + desc->callback = pl011_dma_rx_callback; + desc->callback_param = uap; + dmarx->cookie = dmaengine_submit(desc); + dma_async_issue_pending(rxchan); + + uap->dmacr |= UART011_RXDMAE; + writew(uap->dmacr, uap->port.membase + UART011_DMACR); + uap->dmarx.running = true; + + uap->im &= ~UART011_RXIM; + writew(uap->im, uap->port.membase + UART011_IMSC); + + return 0; +} + +/* + * This is called when either the DMA job is complete, or + * the FIFO timeout interrupt occurred. This must be called + * with the port spinlock uap->port.lock held. + */ +static void pl011_dma_rx_chars(struct uart_amba_port *uap, + u32 pending, bool use_buf_b, + bool readfifo) +{ + struct tty_struct *tty = uap->port.state->port.tty; + struct pl011_sgbuf *sgbuf = use_buf_b ? + &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; + struct device *dev = uap->dmarx.chan->device->dev; + int dma_count = 0; + u32 fifotaken = 0; /* only used for vdbg() */ + + /* Pick everything from the DMA first */ + if (pending) { + /* Sync in buffer */ + dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); + + /* + * First take all chars in the DMA pipe, then look in the FIFO. + * Note that tty_insert_flip_buf() tries to take as many chars + * as it can. + */ + dma_count = tty_insert_flip_string(uap->port.state->port.tty, + sgbuf->buf, pending); + + /* Return buffer to device */ + dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE); + + uap->port.icount.rx += dma_count; + if (dma_count < pending) + dev_warn(uap->port.dev, + "couldn't insert all characters (TTY is full?)\n"); + } + + /* + * Only continue with trying to read the FIFO if all DMA chars have + * been taken first. + */ + if (dma_count == pending && readfifo) { + /* Clear any error flags */ + writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS, + uap->port.membase + UART011_ICR); + + /* + * If we read all the DMA'd characters, and we had an + * incomplete buffer, that could be due to an rx error, or + * maybe we just timed out. Read any pending chars and check + * the error status. + * + * Error conditions will only occur in the FIFO, these will + * trigger an immediate interrupt and stop the DMA job, so we + * will always find the error in the FIFO, never in the DMA + * buffer. + */ + fifotaken = pl011_fifo_to_tty(uap); + } + + spin_unlock(&uap->port.lock); + dev_vdbg(uap->port.dev, + "Took %d chars from DMA buffer and %d chars from the FIFO\n", + dma_count, fifotaken); + tty_flip_buffer_push(tty); + spin_lock(&uap->port.lock); +} + +static void pl011_dma_rx_irq(struct uart_amba_port *uap) +{ + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_chan *rxchan = dmarx->chan; + struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? + &dmarx->sgbuf_b : &dmarx->sgbuf_a; + size_t pending; + struct dma_tx_state state; + enum dma_status dmastat; + + /* + * Pause the transfer so we can trust the current counter, + * do this before we pause the PL011 block, else we may + * overflow the FIFO. + */ + if (dmaengine_pause(rxchan)) + dev_err(uap->port.dev, "unable to pause DMA transfer\n"); + dmastat = rxchan->device->device_tx_status(rxchan, + dmarx->cookie, &state); + if (dmastat != DMA_PAUSED) + dev_err(uap->port.dev, "unable to pause DMA transfer\n"); + + /* Disable RX DMA - incoming data will wait in the FIFO */ + uap->dmacr &= ~UART011_RXDMAE; + writew(uap->dmacr, uap->port.membase + UART011_DMACR); + uap->dmarx.running = false; + + pending = sgbuf->sg.length - state.residue; + BUG_ON(pending > PL011_DMA_BUFFER_SIZE); + /* Then we terminate the transfer - we now know our residue */ + dmaengine_terminate_all(rxchan); + + /* + * This will take the chars we have so far and insert + * into the framework. + */ + pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); + + /* Switch buffer & re-trigger DMA job */ + dmarx->use_buf_b = !dmarx->use_buf_b; + if (pl011_dma_rx_trigger_dma(uap)) { + dev_dbg(uap->port.dev, "could not retrigger RX DMA job " + "fall back to interrupt mode\n"); + uap->im |= UART011_RXIM; + writew(uap->im, uap->port.membase + UART011_IMSC); + } +} + +static void pl011_dma_rx_callback(void *data) +{ + struct uart_amba_port *uap = data; + struct pl011_dmarx_data *dmarx = &uap->dmarx; + bool lastbuf = dmarx->use_buf_b; + int ret; + + /* + * This completion interrupt occurs typically when the + * RX buffer is totally stuffed but no timeout has yet + * occurred. When that happens, we just want the RX + * routine to flush out the secondary DMA buffer while + * we immediately trigger the next DMA job. + */ + spin_lock_irq(&uap->port.lock); + uap->dmarx.running = false; + dmarx->use_buf_b = !lastbuf; + ret = pl011_dma_rx_trigger_dma(uap); + + pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false); + spin_unlock_irq(&uap->port.lock); + /* + * Do this check after we picked the DMA chars so we don't + * get some IRQ immediately from RX. + */ + if (ret) { + dev_dbg(uap->port.dev, "could not retrigger RX DMA job " + "fall back to interrupt mode\n"); + uap->im |= UART011_RXIM; + writew(uap->im, uap->port.membase + UART011_IMSC); + } +} + +/* + * Stop accepting received characters, when we're shutting down or + * suspending this port. + * Locking: called with port lock held and IRQs disabled. + */ +static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) +{ + /* FIXME. Just disable the DMA enable */ + uap->dmacr &= ~UART011_RXDMAE; + writew(uap->dmacr, uap->port.membase + UART011_DMACR); +} static void pl011_dma_startup(struct uart_amba_port *uap) { + int ret; + if (!uap->dmatx.chan) return; @@ -525,8 +857,33 @@ static void pl011_dma_startup(struct uart_amba_port *uap) /* The DMA buffer is now the FIFO the TTY subsystem can use */ uap->port.fifosize = PL011_DMA_BUFFER_SIZE; - uap->using_dma = true; + uap->using_tx_dma = true; + + if (!uap->dmarx.chan) + goto skip_rx; + + /* Allocate and map DMA RX buffers */ + ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a, + DMA_FROM_DEVICE); + if (ret) { + dev_err(uap->port.dev, "failed to init DMA %s: %d\n", + "RX buffer A", ret); + goto skip_rx; + } + + ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b, + DMA_FROM_DEVICE); + if (ret) { + dev_err(uap->port.dev, "failed to init DMA %s: %d\n", + "RX buffer B", ret); + pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, + DMA_FROM_DEVICE); + goto skip_rx; + } + uap->using_rx_dma = true; + +skip_rx: /* Turn on DMA error (RX/TX will be enabled on demand) */ uap->dmacr |= UART011_DMAONERR; writew(uap->dmacr, uap->port.membase + UART011_DMACR); @@ -539,11 +896,17 @@ static void pl011_dma_startup(struct uart_amba_port *uap) if (uap->vendor->dma_threshold) writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16, uap->port.membase + ST_UART011_DMAWM); + + if (uap->using_rx_dma) { + if (pl011_dma_rx_trigger_dma(uap)) + dev_dbg(uap->port.dev, "could not trigger initial " + "RX DMA job, fall back to interrupt mode\n"); + } } static void pl011_dma_shutdown(struct uart_amba_port *uap) { - if (!uap->using_dma) + if (!(uap->using_tx_dma || uap->using_rx_dma)) return; /* Disable RX and TX DMA */ @@ -555,19 +918,39 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap) writew(uap->dmacr, uap->port.membase + UART011_DMACR); spin_unlock_irq(&uap->port.lock); - /* In theory, this should already be done by pl011_dma_flush_buffer */ - dmaengine_terminate_all(uap->dmatx.chan); - if (uap->dmatx.queued) { - dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, - DMA_TO_DEVICE); - uap->dmatx.queued = false; + if (uap->using_tx_dma) { + /* In theory, this should already be done by pl011_dma_flush_buffer */ + dmaengine_terminate_all(uap->dmatx.chan); + if (uap->dmatx.queued) { + dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, + DMA_TO_DEVICE); + uap->dmatx.queued = false; + } + + kfree(uap->dmatx.buf); + uap->using_tx_dma = false; } - kfree(uap->dmatx.buf); + if (uap->using_rx_dma) { + dmaengine_terminate_all(uap->dmarx.chan); + /* Clean up the RX DMA */ + pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); + pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); + uap->using_rx_dma = false; + } +} - uap->using_dma = false; +static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) +{ + return uap->using_rx_dma; } +static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) +{ + return uap->using_rx_dma && uap->dmarx.running; +} + + #else /* Blank functions if the DMA engine is not available */ static inline void pl011_dma_probe(struct uart_amba_port *uap) @@ -600,6 +983,29 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap) return false; } +static inline void pl011_dma_rx_irq(struct uart_amba_port *uap) +{ +} + +static inline void pl011_dma_rx_stop(struct uart_amba_port *uap) +{ +} + +static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap) +{ + return -EIO; +} + +static inline bool pl011_dma_rx_available(struct uart_amba_port *uap) +{ + return false; +} + +static inline bool pl011_dma_rx_running(struct uart_amba_port *uap) +{ + return false; +} + #define pl011_dma_flush_buffer NULL #endif @@ -630,6 +1036,8 @@ static void pl011_stop_rx(struct uart_port *port) uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| UART011_PEIM|UART011_BEIM|UART011_OEIM); writew(uap->im, uap->port.membase + UART011_IMSC); + + pl011_dma_rx_stop(uap); } static void pl011_enable_ms(struct uart_port *port) @@ -643,51 +1051,24 @@ static void pl011_enable_ms(struct uart_port *port) static void pl011_rx_chars(struct uart_amba_port *uap) { struct tty_struct *tty = uap->port.state->port.tty; - unsigned int status, ch, flag, max_count = 256; - - status = readw(uap->port.membase + UART01x_FR); - while ((status & UART01x_FR_RXFE) == 0 && max_count--) { - ch = readw(uap->port.membase + UART01x_DR) | UART_DUMMY_DR_RX; - flag = TTY_NORMAL; - uap->port.icount.rx++; - - /* - * Note that the error handling code is - * out of the main execution path - */ - if (unlikely(ch & UART_DR_ERROR)) { - if (ch & UART011_DR_BE) { - ch &= ~(UART011_DR_FE | UART011_DR_PE); - uap->port.icount.brk++; - if (uart_handle_break(&uap->port)) - goto ignore_char; - } else if (ch & UART011_DR_PE) - uap->port.icount.parity++; - else if (ch & UART011_DR_FE) - uap->port.icount.frame++; - if (ch & UART011_DR_OE) - uap->port.icount.overrun++; - - ch &= uap->port.read_status_mask; - - if (ch & UART011_DR_BE) - flag = TTY_BREAK; - else if (ch & UART011_DR_PE) - flag = TTY_PARITY; - else if (ch & UART011_DR_FE) - flag = TTY_FRAME; - } - if (uart_handle_sysrq_char(&uap->port, ch & 255)) - goto ignore_char; - - uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); + pl011_fifo_to_tty(uap); - ignore_char: - status = readw(uap->port.membase + UART01x_FR); - } spin_unlock(&uap->port.lock); tty_flip_buffer_push(tty); + /* + * If we were temporarily out of DMA mode for a while, + * attempt to switch back to DMA mode again. + */ + if (pl011_dma_rx_available(uap)) { + if (pl011_dma_rx_trigger_dma(uap)) { + dev_dbg(uap->port.dev, "could not trigger RX DMA job " + "fall back to interrupt mode again\n"); + uap->im |= UART011_RXIM; + } else + uap->im &= ~UART011_RXIM; + writew(uap->im, uap->port.membase + UART011_IMSC); + } spin_lock(&uap->port.lock); } @@ -767,8 +1148,12 @@ static irqreturn_t pl011_int(int irq, void *dev_id) UART011_RXIS), uap->port.membase + UART011_ICR); - if (status & (UART011_RTIS|UART011_RXIS)) - pl011_rx_chars(uap); + if (status & (UART011_RTIS|UART011_RXIS)) { + if (pl011_dma_rx_running(uap)) + pl011_dma_rx_irq(uap); + else + pl011_rx_chars(uap); + } if (status & (UART011_DSRMIS|UART011_DCDMIS| UART011_CTSMIS|UART011_RIMIS)) pl011_modem_status(uap); @@ -945,10 +1330,14 @@ static int pl011_startup(struct uart_port *port) pl011_dma_startup(uap); /* - * Finally, enable interrupts + * Finally, enable interrupts, only timeouts when using DMA + * if initial RX DMA job failed, start in interrupt mode + * as well. */ spin_lock_irq(&uap->port.lock); - uap->im = UART011_RXIM | UART011_RTIM; + uap->im = UART011_RTIM; + if (!pl011_dma_rx_running(uap)) + uap->im |= UART011_RXIM; writew(uap->im, uap->port.membase + UART011_IMSC); spin_unlock_irq(&uap->port.lock); @@ -1349,7 +1738,7 @@ static struct uart_driver amba_reg = { .cons = AMBA_CONSOLE, }; -static int pl011_probe(struct amba_device *dev, struct amba_id *id) +static int pl011_probe(struct amba_device *dev, const struct amba_id *id) { struct uart_amba_port *uap; struct vendor_data *vendor = id->data; diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 8e43a7b69e64..bfee9b4c6661 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -3,6 +3,7 @@ * * Copyright (C) 2007 Google, Inc. * Author: Robert Love <rlove@google.com> + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -31,6 +32,7 @@ #include <linux/serial.h> #include <linux/clk.h> #include <linux/platform_device.h> +#include <linux/delay.h> #include "msm_serial.h" @@ -38,9 +40,20 @@ struct msm_port { struct uart_port uart; char name[16]; struct clk *clk; + struct clk *pclk; unsigned int imr; + unsigned int *gsbi_base; + int is_uartdm; + unsigned int old_snap_state; }; +static inline void wait_for_xmitr(struct uart_port *port, int bits) +{ + if (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) + while ((msm_read(port, UART_ISR) & bits) != bits) + cpu_relax(); +} + static void msm_stop_tx(struct uart_port *port) { struct msm_port *msm_port = UART_TO_MSM(port); @@ -73,6 +86,61 @@ static void msm_enable_ms(struct uart_port *port) msm_write(port, msm_port->imr, UART_IMR); } +static void handle_rx_dm(struct uart_port *port, unsigned int misr) +{ + struct tty_struct *tty = port->state->port.tty; + unsigned int sr; + int count = 0; + struct msm_port *msm_port = UART_TO_MSM(port); + + if ((msm_read(port, UART_SR) & UART_SR_OVERRUN)) { + port->icount.overrun++; + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR); + } + + if (misr & UART_IMR_RXSTALE) { + count = msm_read(port, UARTDM_RX_TOTAL_SNAP) - + msm_port->old_snap_state; + msm_port->old_snap_state = 0; + } else { + count = 4 * (msm_read(port, UART_RFWR)); + msm_port->old_snap_state += count; + } + + /* TODO: Precise error reporting */ + + port->icount.rx += count; + + while (count > 0) { + unsigned int c; + + sr = msm_read(port, UART_SR); + if ((sr & UART_SR_RX_READY) == 0) { + msm_port->old_snap_state -= count; + break; + } + c = msm_read(port, UARTDM_RF); + if (sr & UART_SR_RX_BREAK) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } else if (sr & UART_SR_PAR_FRAME_ERR) + port->icount.frame++; + + /* TODO: handle sysrq */ + tty_insert_flip_string(tty, (char *) &c, + (count > 4) ? 4 : count); + count -= 4; + } + + tty_flip_buffer_push(tty); + if (misr & (UART_IMR_RXSTALE)) + msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR); + msm_write(port, 0xFFFFFF, UARTDM_DMRX); + msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR); +} + static void handle_rx(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; @@ -121,6 +189,12 @@ static void handle_rx(struct uart_port *port) tty_flip_buffer_push(tty); } +static void reset_dm_count(struct uart_port *port) +{ + wait_for_xmitr(port, UART_ISR_TX_READY); + msm_write(port, 1, UARTDM_NCF_TX); +} + static void handle_tx(struct uart_port *port) { struct circ_buf *xmit = &port->state->xmit; @@ -128,11 +202,18 @@ static void handle_tx(struct uart_port *port) int sent_tx; if (port->x_char) { - msm_write(port, port->x_char, UART_TF); + if (msm_port->is_uartdm) + reset_dm_count(port); + + msm_write(port, port->x_char, + msm_port->is_uartdm ? UARTDM_TF : UART_TF); port->icount.tx++; port->x_char = 0; } + if (msm_port->is_uartdm) + reset_dm_count(port); + while (msm_read(port, UART_SR) & UART_SR_TX_READY) { if (uart_circ_empty(xmit)) { /* disable tx interrupts */ @@ -140,8 +221,11 @@ static void handle_tx(struct uart_port *port) msm_write(port, msm_port->imr, UART_IMR); break; } + msm_write(port, xmit->buf[xmit->tail], + msm_port->is_uartdm ? UARTDM_TF : UART_TF); - msm_write(port, xmit->buf[xmit->tail], UART_TF); + if (msm_port->is_uartdm) + reset_dm_count(port); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; @@ -169,8 +253,12 @@ static irqreturn_t msm_irq(int irq, void *dev_id) misr = msm_read(port, UART_MISR); msm_write(port, 0, UART_IMR); /* disable interrupt */ - if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) - handle_rx(port); + if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) { + if (msm_port->is_uartdm) + handle_rx_dm(port, misr); + else + handle_rx(port); + } if (misr & UART_IMR_TXLEV) handle_tx(port); if (misr & UART_IMR_DELTA_CTS) @@ -192,10 +280,21 @@ static unsigned int msm_get_mctrl(struct uart_port *port) return TIOCM_CAR | TIOCM_CTS | TIOCM_DSR | TIOCM_RTS; } -static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl) + +static void msm_reset(struct uart_port *port) { - unsigned int mr; + /* reset everything */ + msm_write(port, UART_CR_CMD_RESET_RX, UART_CR); + msm_write(port, UART_CR_CMD_RESET_TX, UART_CR); + msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR); + msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR); + msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR); + msm_write(port, UART_CR_CMD_SET_RFR, UART_CR); +} +void msm_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + unsigned int mr; mr = msm_read(port, UART_MR1); if (!(mctrl & TIOCM_RTS)) { @@ -219,6 +318,7 @@ static void msm_break_ctl(struct uart_port *port, int break_ctl) static int msm_set_baud_rate(struct uart_port *port, unsigned int baud) { unsigned int baud_code, rxstale, watermark; + struct msm_port *msm_port = UART_TO_MSM(port); switch (baud) { case 300: @@ -273,6 +373,9 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud) break; } + if (msm_port->is_uartdm) + msm_write(port, UART_CR_CMD_RESET_RX, UART_CR); + msm_write(port, baud_code, UART_CSR); /* RX stale watermark */ @@ -288,25 +391,23 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud) /* set TX watermark */ msm_write(port, 10, UART_TFWR); + if (msm_port->is_uartdm) { + msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR); + msm_write(port, 0xFFFFFF, UARTDM_DMRX); + msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR); + } + return baud; } -static void msm_reset(struct uart_port *port) -{ - /* reset everything */ - msm_write(port, UART_CR_CMD_RESET_RX, UART_CR); - msm_write(port, UART_CR_CMD_RESET_TX, UART_CR); - msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR); - msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR); - msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR); - msm_write(port, UART_CR_CMD_SET_RFR, UART_CR); -} static void msm_init_clock(struct uart_port *port) { struct msm_port *msm_port = UART_TO_MSM(port); clk_enable(msm_port->clk); + if (!IS_ERR(msm_port->pclk)) + clk_enable(msm_port->pclk); msm_serial_set_mnd_regs(port); } @@ -347,15 +448,31 @@ static int msm_startup(struct uart_port *port) msm_write(port, data, UART_IPR); } - msm_reset(port); + data = 0; + if (!port->cons || (port->cons && !(port->cons->flags & CON_ENABLED))) { + msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR); + msm_reset(port); + data = UART_CR_TX_ENABLE; + } + + data |= UART_CR_RX_ENABLE; + msm_write(port, data, UART_CR); /* enable TX & RX */ - msm_write(port, 0x05, UART_CR); /* enable TX & RX */ + /* Make sure IPR is not 0 to start with*/ + if (msm_port->is_uartdm) + msm_write(port, UART_IPR_STALE_LSB, UART_IPR); /* turn on RX and CTS interrupts */ msm_port->imr = UART_IMR_RXLEV | UART_IMR_RXSTALE | UART_IMR_CURRENT_CTS; - msm_write(port, msm_port->imr, UART_IMR); + if (msm_port->is_uartdm) { + msm_write(port, 0xFFFFFF, UARTDM_DMRX); + msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR); + msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR); + } + + msm_write(port, msm_port->imr, UART_IMR); return 0; } @@ -384,7 +501,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios, baud = msm_set_baud_rate(port, baud); if (tty_termios_baud_rate(termios)) tty_termios_encode_baud_rate(termios, baud, baud); - + /* calculate parity */ mr = msm_read(port, UART_MR2); mr &= ~UART_MR2_PARITY_MODE; @@ -454,48 +571,105 @@ static const char *msm_type(struct uart_port *port) static void msm_release_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); - struct resource *resource; + struct msm_port *msm_port = UART_TO_MSM(port); + struct resource *uart_resource; + struct resource *gsbi_resource; resource_size_t size; - resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (unlikely(!resource)) + uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(!uart_resource)) return; - size = resource->end - resource->start + 1; + size = resource_size(uart_resource); release_mem_region(port->mapbase, size); iounmap(port->membase); port->membase = NULL; + + if (msm_port->gsbi_base) { + iowrite32(GSBI_PROTOCOL_IDLE, msm_port->gsbi_base + + GSBI_CONTROL); + + gsbi_resource = platform_get_resource_byname(pdev, + IORESOURCE_MEM, + "gsbi_resource"); + + if (unlikely(!gsbi_resource)) + return; + + size = resource_size(gsbi_resource); + release_mem_region(gsbi_resource->start, size); + iounmap(msm_port->gsbi_base); + msm_port->gsbi_base = NULL; + } } static int msm_request_port(struct uart_port *port) { + struct msm_port *msm_port = UART_TO_MSM(port); struct platform_device *pdev = to_platform_device(port->dev); - struct resource *resource; + struct resource *uart_resource; + struct resource *gsbi_resource; resource_size_t size; + int ret; - resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (unlikely(!resource)) + uart_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "uart_resource"); + if (unlikely(!uart_resource)) return -ENXIO; - size = resource->end - resource->start + 1; - if (unlikely(!request_mem_region(port->mapbase, size, "msm_serial"))) + size = resource_size(uart_resource); + + if (!request_mem_region(port->mapbase, size, "msm_serial")) return -EBUSY; port->membase = ioremap(port->mapbase, size); if (!port->membase) { - release_mem_region(port->mapbase, size); - return -EBUSY; + ret = -EBUSY; + goto fail_release_port; + } + + gsbi_resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "gsbi_resource"); + /* Is this a GSBI-based port? */ + if (gsbi_resource) { + size = resource_size(gsbi_resource); + + if (!request_mem_region(gsbi_resource->start, size, + "msm_serial")) { + ret = -EBUSY; + goto fail_release_port; + } + + msm_port->gsbi_base = ioremap(gsbi_resource->start, size); + if (!msm_port->gsbi_base) { + ret = -EBUSY; + goto fail_release_gsbi; + } } return 0; + +fail_release_gsbi: + release_mem_region(gsbi_resource->start, size); +fail_release_port: + release_mem_region(port->mapbase, size); + return ret; } static void msm_config_port(struct uart_port *port, int flags) { + struct msm_port *msm_port = UART_TO_MSM(port); + int ret; if (flags & UART_CONFIG_TYPE) { port->type = PORT_MSM; - msm_request_port(port); + ret = msm_request_port(port); + if (ret) + return; } + + if (msm_port->is_uartdm) + iowrite32(GSBI_PROTOCOL_UART, msm_port->gsbi_base + + GSBI_CONTROL); } static int msm_verify_port(struct uart_port *port, struct serial_struct *ser) @@ -515,9 +689,13 @@ static void msm_power(struct uart_port *port, unsigned int state, switch (state) { case 0: clk_enable(msm_port->clk); + if (!IS_ERR(msm_port->pclk)) + clk_enable(msm_port->pclk); break; case 3: clk_disable(msm_port->clk); + if (!IS_ERR(msm_port->pclk)) + clk_disable(msm_port->pclk); break; default: printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state); @@ -550,7 +728,7 @@ static struct msm_port msm_uart_ports[] = { .iotype = UPIO_MEM, .ops = &msm_uart_pops, .flags = UPF_BOOT_AUTOCONF, - .fifosize = 512, + .fifosize = 64, .line = 0, }, }, @@ -559,7 +737,7 @@ static struct msm_port msm_uart_ports[] = { .iotype = UPIO_MEM, .ops = &msm_uart_pops, .flags = UPF_BOOT_AUTOCONF, - .fifosize = 512, + .fifosize = 64, .line = 1, }, }, @@ -585,9 +763,14 @@ static inline struct uart_port *get_port_from_line(unsigned int line) static void msm_console_putchar(struct uart_port *port, int c) { + struct msm_port *msm_port = UART_TO_MSM(port); + + if (msm_port->is_uartdm) + reset_dm_count(port); + while (!(msm_read(port, UART_SR) & UART_SR_TX_READY)) ; - msm_write(port, c, UART_TF); + msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF); } static void msm_console_write(struct console *co, const char *s, @@ -609,12 +792,14 @@ static void msm_console_write(struct console *co, const char *s, static int __init msm_console_setup(struct console *co, char *options) { struct uart_port *port; + struct msm_port *msm_port; int baud, flow, bits, parity; if (unlikely(co->index >= UART_NR || co->index < 0)) return -ENXIO; port = get_port_from_line(co->index); + msm_port = UART_TO_MSM(port); if (unlikely(!port->membase)) return -ENXIO; @@ -638,6 +823,11 @@ static int __init msm_console_setup(struct console *co, char *options) msm_reset(port); + if (msm_port->is_uartdm) { + msm_write(port, UART_CR_CMD_PROTECTION_EN, UART_CR); + msm_write(port, UART_CR_TX_ENABLE, UART_CR); + } + printk(KERN_INFO "msm_serial: console setup on port #%d\n", port->line); return uart_set_options(port, co, baud, parity, bits, flow); @@ -685,14 +875,32 @@ static int __init msm_serial_probe(struct platform_device *pdev) port->dev = &pdev->dev; msm_port = UART_TO_MSM(port); - msm_port->clk = clk_get(&pdev->dev, "uart_clk"); - if (IS_ERR(msm_port->clk)) - return PTR_ERR(msm_port->clk); + if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsbi_resource")) + msm_port->is_uartdm = 1; + else + msm_port->is_uartdm = 0; + + if (msm_port->is_uartdm) { + msm_port->clk = clk_get(&pdev->dev, "gsbi_uart_clk"); + msm_port->pclk = clk_get(&pdev->dev, "gsbi_pclk"); + } else { + msm_port->clk = clk_get(&pdev->dev, "uart_clk"); + msm_port->pclk = ERR_PTR(-ENOENT); + } + + if (unlikely(IS_ERR(msm_port->clk) || (IS_ERR(msm_port->pclk) && + msm_port->is_uartdm))) + return PTR_ERR(msm_port->clk); + + if (msm_port->is_uartdm) + clk_set_rate(msm_port->clk, 7372800); + port->uartclk = clk_get_rate(msm_port->clk); printk(KERN_INFO "uartclk = %d\n", port->uartclk); - resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "uart_resource"); if (unlikely(!resource)) return -ENXIO; port->mapbase = resource->start; diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h index f6ca9ca79e98..9b8dc5d0d855 100644 --- a/drivers/tty/serial/msm_serial.h +++ b/drivers/tty/serial/msm_serial.h @@ -3,6 +3,7 @@ * * Copyright (C) 2007 Google, Inc. * Author: Robert Love <rlove@google.com> + * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -54,6 +55,7 @@ #define UART_CSR_300 0x22 #define UART_TF 0x000C +#define UARTDM_TF 0x0070 #define UART_CR 0x0010 #define UART_CR_CMD_NULL (0 << 4) @@ -64,14 +66,17 @@ #define UART_CR_CMD_START_BREAK (5 << 4) #define UART_CR_CMD_STOP_BREAK (6 << 4) #define UART_CR_CMD_RESET_CTS (7 << 4) +#define UART_CR_CMD_RESET_STALE_INT (8 << 4) #define UART_CR_CMD_PACKET_MODE (9 << 4) #define UART_CR_CMD_MODE_RESET (12 << 4) #define UART_CR_CMD_SET_RFR (13 << 4) #define UART_CR_CMD_RESET_RFR (14 << 4) +#define UART_CR_CMD_PROTECTION_EN (16 << 4) +#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4) #define UART_CR_TX_DISABLE (1 << 3) -#define UART_CR_TX_ENABLE (1 << 3) -#define UART_CR_RX_DISABLE (1 << 3) -#define UART_CR_RX_ENABLE (1 << 3) +#define UART_CR_TX_ENABLE (1 << 2) +#define UART_CR_RX_DISABLE (1 << 1) +#define UART_CR_RX_ENABLE (1 << 0) #define UART_IMR 0x0014 #define UART_IMR_TXLEV (1 << 0) @@ -110,9 +115,20 @@ #define UART_SR_RX_FULL (1 << 1) #define UART_SR_RX_READY (1 << 0) -#define UART_RF 0x000C -#define UART_MISR 0x0010 -#define UART_ISR 0x0014 +#define UART_RF 0x000C +#define UARTDM_RF 0x0070 +#define UART_MISR 0x0010 +#define UART_ISR 0x0014 +#define UART_ISR_TX_READY (1 << 7) + +#define GSBI_CONTROL 0x0 +#define GSBI_PROTOCOL_CODE 0x30 +#define GSBI_PROTOCOL_UART 0x40 +#define GSBI_PROTOCOL_IDLE 0x0 + +#define UARTDM_DMRX 0x34 +#define UARTDM_NCF_TX 0x40 +#define UARTDM_RX_TOTAL_SNAP 0x38 #define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port) diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c new file mode 100644 index 000000000000..7e02c9c344fe --- /dev/null +++ b/drivers/tty/serial/mxs-auart.c @@ -0,0 +1,798 @@ +/* + * Freescale STMP37XX/STMP378X Application UART driver + * + * Author: dmitry pervushin <dimka@embeddedalley.com> + * + * Copyright 2008-2010 Freescale Semiconductor, Inc. + * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. + * + * The code contained herein is licensed under the GNU General Public + * License. You may obtain a copy of the GNU General Public License + * Version 2 or later at the following locations: + * + * http://www.opensource.org/licenses/gpl-license.html + * http://www.gnu.org/copyleft/gpl.html + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/console.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> +#include <linux/serial.h> +#include <linux/serial_core.h> +#include <linux/platform_device.h> +#include <linux/device.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> + +#include <asm/cacheflush.h> + +#define MXS_AUART_PORTS 5 + +#define AUART_CTRL0 0x00000000 +#define AUART_CTRL0_SET 0x00000004 +#define AUART_CTRL0_CLR 0x00000008 +#define AUART_CTRL0_TOG 0x0000000c +#define AUART_CTRL1 0x00000010 +#define AUART_CTRL1_SET 0x00000014 +#define AUART_CTRL1_CLR 0x00000018 +#define AUART_CTRL1_TOG 0x0000001c +#define AUART_CTRL2 0x00000020 +#define AUART_CTRL2_SET 0x00000024 +#define AUART_CTRL2_CLR 0x00000028 +#define AUART_CTRL2_TOG 0x0000002c +#define AUART_LINECTRL 0x00000030 +#define AUART_LINECTRL_SET 0x00000034 +#define AUART_LINECTRL_CLR 0x00000038 +#define AUART_LINECTRL_TOG 0x0000003c +#define AUART_LINECTRL2 0x00000040 +#define AUART_LINECTRL2_SET 0x00000044 +#define AUART_LINECTRL2_CLR 0x00000048 +#define AUART_LINECTRL2_TOG 0x0000004c +#define AUART_INTR 0x00000050 +#define AUART_INTR_SET 0x00000054 +#define AUART_INTR_CLR 0x00000058 +#define AUART_INTR_TOG 0x0000005c +#define AUART_DATA 0x00000060 +#define AUART_STAT 0x00000070 +#define AUART_DEBUG 0x00000080 +#define AUART_VERSION 0x00000090 +#define AUART_AUTOBAUD 0x000000a0 + +#define AUART_CTRL0_SFTRST (1 << 31) +#define AUART_CTRL0_CLKGATE (1 << 30) + +#define AUART_CTRL2_CTSEN (1 << 15) +#define AUART_CTRL2_RTS (1 << 11) +#define AUART_CTRL2_RXE (1 << 9) +#define AUART_CTRL2_TXE (1 << 8) +#define AUART_CTRL2_UARTEN (1 << 0) + +#define AUART_LINECTRL_BAUD_DIVINT_SHIFT 16 +#define AUART_LINECTRL_BAUD_DIVINT_MASK 0xffff0000 +#define AUART_LINECTRL_BAUD_DIVINT(v) (((v) & 0xffff) << 16) +#define AUART_LINECTRL_BAUD_DIVFRAC_SHIFT 8 +#define AUART_LINECTRL_BAUD_DIVFRAC_MASK 0x00003f00 +#define AUART_LINECTRL_BAUD_DIVFRAC(v) (((v) & 0x3f) << 8) +#define AUART_LINECTRL_WLEN_MASK 0x00000060 +#define AUART_LINECTRL_WLEN(v) (((v) & 0x3) << 5) +#define AUART_LINECTRL_FEN (1 << 4) +#define AUART_LINECTRL_STP2 (1 << 3) +#define AUART_LINECTRL_EPS (1 << 2) +#define AUART_LINECTRL_PEN (1 << 1) +#define AUART_LINECTRL_BRK (1 << 0) + +#define AUART_INTR_RTIEN (1 << 22) +#define AUART_INTR_TXIEN (1 << 21) +#define AUART_INTR_RXIEN (1 << 20) +#define AUART_INTR_CTSMIEN (1 << 17) +#define AUART_INTR_RTIS (1 << 6) +#define AUART_INTR_TXIS (1 << 5) +#define AUART_INTR_RXIS (1 << 4) +#define AUART_INTR_CTSMIS (1 << 1) + +#define AUART_STAT_BUSY (1 << 29) +#define AUART_STAT_CTS (1 << 28) +#define AUART_STAT_TXFE (1 << 27) +#define AUART_STAT_TXFF (1 << 25) +#define AUART_STAT_RXFE (1 << 24) +#define AUART_STAT_OERR (1 << 19) +#define AUART_STAT_BERR (1 << 18) +#define AUART_STAT_PERR (1 << 17) +#define AUART_STAT_FERR (1 << 16) + +static struct uart_driver auart_driver; + +struct mxs_auart_port { + struct uart_port port; + + unsigned int flags; + unsigned int ctrl; + + unsigned int irq; + + struct clk *clk; + struct device *dev; +}; + +static void mxs_auart_stop_tx(struct uart_port *u); + +#define to_auart_port(u) container_of(u, struct mxs_auart_port, port) + +static inline void mxs_auart_tx_chars(struct mxs_auart_port *s) +{ + struct circ_buf *xmit = &s->port.state->xmit; + + while (!(readl(s->port.membase + AUART_STAT) & + AUART_STAT_TXFF)) { + if (s->port.x_char) { + s->port.icount.tx++; + writel(s->port.x_char, + s->port.membase + AUART_DATA); + s->port.x_char = 0; + continue; + } + if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) { + s->port.icount.tx++; + writel(xmit->buf[xmit->tail], + s->port.membase + AUART_DATA); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&s->port); + } else + break; + } + if (uart_circ_empty(&(s->port.state->xmit))) + writel(AUART_INTR_TXIEN, + s->port.membase + AUART_INTR_CLR); + else + writel(AUART_INTR_TXIEN, + s->port.membase + AUART_INTR_SET); + + if (uart_tx_stopped(&s->port)) + mxs_auart_stop_tx(&s->port); +} + +static void mxs_auart_rx_char(struct mxs_auart_port *s) +{ + int flag; + u32 stat; + u8 c; + + c = readl(s->port.membase + AUART_DATA); + stat = readl(s->port.membase + AUART_STAT); + + flag = TTY_NORMAL; + s->port.icount.rx++; + + if (stat & AUART_STAT_BERR) { + s->port.icount.brk++; + if (uart_handle_break(&s->port)) + goto out; + } else if (stat & AUART_STAT_PERR) { + s->port.icount.parity++; + } else if (stat & AUART_STAT_FERR) { + s->port.icount.frame++; + } + + /* + * Mask off conditions which should be ingored. + */ + stat &= s->port.read_status_mask; + + if (stat & AUART_STAT_BERR) { + flag = TTY_BREAK; + } else if (stat & AUART_STAT_PERR) + flag = TTY_PARITY; + else if (stat & AUART_STAT_FERR) + flag = TTY_FRAME; + + if (stat & AUART_STAT_OERR) + s->port.icount.overrun++; + + if (uart_handle_sysrq_char(&s->port, c)) + goto out; + + uart_insert_char(&s->port, stat, AUART_STAT_OERR, c, flag); +out: + writel(stat, s->port.membase + AUART_STAT); +} + +static void mxs_auart_rx_chars(struct mxs_auart_port *s) +{ + struct tty_struct *tty = s->port.state->port.tty; + u32 stat = 0; + + for (;;) { + stat = readl(s->port.membase + AUART_STAT); + if (stat & AUART_STAT_RXFE) + break; + mxs_auart_rx_char(s); + } + + writel(stat, s->port.membase + AUART_STAT); + tty_flip_buffer_push(tty); +} + +static int mxs_auart_request_port(struct uart_port *u) +{ + return 0; +} + +static int mxs_auart_verify_port(struct uart_port *u, + struct serial_struct *ser) +{ + if (u->type != PORT_UNKNOWN && u->type != PORT_IMX) + return -EINVAL; + return 0; +} + +static void mxs_auart_config_port(struct uart_port *u, int flags) +{ +} + +static const char *mxs_auart_type(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + + return dev_name(s->dev); +} + +static void mxs_auart_release_port(struct uart_port *u) +{ +} + +static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl) +{ + struct mxs_auart_port *s = to_auart_port(u); + + u32 ctrl = readl(u->membase + AUART_CTRL2); + + ctrl &= ~AUART_CTRL2_RTS; + if (mctrl & TIOCM_RTS) + ctrl |= AUART_CTRL2_RTS; + s->ctrl = mctrl; + writel(ctrl, u->membase + AUART_CTRL2); +} + +static u32 mxs_auart_get_mctrl(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + u32 stat = readl(u->membase + AUART_STAT); + int ctrl2 = readl(u->membase + AUART_CTRL2); + u32 mctrl = s->ctrl; + + mctrl &= ~TIOCM_CTS; + if (stat & AUART_STAT_CTS) + mctrl |= TIOCM_CTS; + + if (ctrl2 & AUART_CTRL2_RTS) + mctrl |= TIOCM_RTS; + + return mctrl; +} + +static void mxs_auart_settermios(struct uart_port *u, + struct ktermios *termios, + struct ktermios *old) +{ + u32 bm, ctrl, ctrl2, div; + unsigned int cflag, baud; + + cflag = termios->c_cflag; + + ctrl = AUART_LINECTRL_FEN; + ctrl2 = readl(u->membase + AUART_CTRL2); + + /* byte size */ + switch (cflag & CSIZE) { + case CS5: + bm = 0; + break; + case CS6: + bm = 1; + break; + case CS7: + bm = 2; + break; + case CS8: + bm = 3; + break; + default: + return; + } + + ctrl |= AUART_LINECTRL_WLEN(bm); + + /* parity */ + if (cflag & PARENB) { + ctrl |= AUART_LINECTRL_PEN; + if ((cflag & PARODD) == 0) + ctrl |= AUART_LINECTRL_EPS; + } + + u->read_status_mask = 0; + + if (termios->c_iflag & INPCK) + u->read_status_mask |= AUART_STAT_PERR; + if (termios->c_iflag & (BRKINT | PARMRK)) + u->read_status_mask |= AUART_STAT_BERR; + + /* + * Characters to ignore + */ + u->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + u->ignore_status_mask |= AUART_STAT_PERR; + if (termios->c_iflag & IGNBRK) { + u->ignore_status_mask |= AUART_STAT_BERR; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + u->ignore_status_mask |= AUART_STAT_OERR; + } + + /* + * ignore all characters if CREAD is not set + */ + if (cflag & CREAD) + ctrl2 |= AUART_CTRL2_RXE; + else + ctrl2 &= ~AUART_CTRL2_RXE; + + /* figure out the stop bits requested */ + if (cflag & CSTOPB) + ctrl |= AUART_LINECTRL_STP2; + + /* figure out the hardware flow control settings */ + if (cflag & CRTSCTS) + ctrl2 |= AUART_CTRL2_CTSEN; + else + ctrl2 &= ~AUART_CTRL2_CTSEN; + + /* set baud rate */ + baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk); + div = u->uartclk * 32 / baud; + ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F); + ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6); + + writel(ctrl, u->membase + AUART_LINECTRL); + writel(ctrl2, u->membase + AUART_CTRL2); +} + +static irqreturn_t mxs_auart_irq_handle(int irq, void *context) +{ + u32 istatus, istat; + struct mxs_auart_port *s = context; + u32 stat = readl(s->port.membase + AUART_STAT); + + istatus = istat = readl(s->port.membase + AUART_INTR); + + if (istat & AUART_INTR_CTSMIS) { + uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS); + writel(AUART_INTR_CTSMIS, + s->port.membase + AUART_INTR_CLR); + istat &= ~AUART_INTR_CTSMIS; + } + + if (istat & (AUART_INTR_RTIS | AUART_INTR_RXIS)) { + mxs_auart_rx_chars(s); + istat &= ~(AUART_INTR_RTIS | AUART_INTR_RXIS); + } + + if (istat & AUART_INTR_TXIS) { + mxs_auart_tx_chars(s); + istat &= ~AUART_INTR_TXIS; + } + + writel(istatus & (AUART_INTR_RTIS + | AUART_INTR_TXIS + | AUART_INTR_RXIS + | AUART_INTR_CTSMIS), + s->port.membase + AUART_INTR_CLR); + + return IRQ_HANDLED; +} + +static void mxs_auart_reset(struct uart_port *u) +{ + int i; + unsigned int reg; + + writel(AUART_CTRL0_SFTRST, u->membase + AUART_CTRL0_CLR); + + for (i = 0; i < 10000; i++) { + reg = readl(u->membase + AUART_CTRL0); + if (!(reg & AUART_CTRL0_SFTRST)) + break; + udelay(3); + } + writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR); +} + +static int mxs_auart_startup(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + + clk_enable(s->clk); + + writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR); + + writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_SET); + + writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN, + u->membase + AUART_INTR); + + /* + * Enable fifo so all four bytes of a DMA word are written to + * output (otherwise, only the LSB is written, ie. 1 in 4 bytes) + */ + writel(AUART_LINECTRL_FEN, u->membase + AUART_LINECTRL_SET); + + return 0; +} + +static void mxs_auart_shutdown(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + + writel(AUART_CTRL2_UARTEN, u->membase + AUART_CTRL2_CLR); + + writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_SET); + + writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN, + u->membase + AUART_INTR_CLR); + + clk_disable(s->clk); +} + +static unsigned int mxs_auart_tx_empty(struct uart_port *u) +{ + if (readl(u->membase + AUART_STAT) & AUART_STAT_TXFE) + return TIOCSER_TEMT; + else + return 0; +} + +static void mxs_auart_start_tx(struct uart_port *u) +{ + struct mxs_auart_port *s = to_auart_port(u); + + /* enable transmitter */ + writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_SET); + + mxs_auart_tx_chars(s); +} + +static void mxs_auart_stop_tx(struct uart_port *u) +{ + writel(AUART_CTRL2_TXE, u->membase + AUART_CTRL2_CLR); +} + +static void mxs_auart_stop_rx(struct uart_port *u) +{ + writel(AUART_CTRL2_RXE, u->membase + AUART_CTRL2_CLR); +} + +static void mxs_auart_break_ctl(struct uart_port *u, int ctl) +{ + if (ctl) + writel(AUART_LINECTRL_BRK, + u->membase + AUART_LINECTRL_SET); + else + writel(AUART_LINECTRL_BRK, + u->membase + AUART_LINECTRL_CLR); +} + +static void mxs_auart_enable_ms(struct uart_port *port) +{ + /* just empty */ +} + +static struct uart_ops mxs_auart_ops = { + .tx_empty = mxs_auart_tx_empty, + .start_tx = mxs_auart_start_tx, + .stop_tx = mxs_auart_stop_tx, + .stop_rx = mxs_auart_stop_rx, + .enable_ms = mxs_auart_enable_ms, + .break_ctl = mxs_auart_break_ctl, + .set_mctrl = mxs_auart_set_mctrl, + .get_mctrl = mxs_auart_get_mctrl, + .startup = mxs_auart_startup, + .shutdown = mxs_auart_shutdown, + .set_termios = mxs_auart_settermios, + .type = mxs_auart_type, + .release_port = mxs_auart_release_port, + .request_port = mxs_auart_request_port, + .config_port = mxs_auart_config_port, + .verify_port = mxs_auart_verify_port, +}; + +static struct mxs_auart_port *auart_port[MXS_AUART_PORTS]; + +#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE +static void mxs_auart_console_putchar(struct uart_port *port, int ch) +{ + unsigned int to = 1000; + + while (readl(port->membase + AUART_STAT) & AUART_STAT_TXFF) { + if (!to--) + break; + udelay(1); + } + + writel(ch, port->membase + AUART_DATA); +} + +static void +auart_console_write(struct console *co, const char *str, unsigned int count) +{ + struct mxs_auart_port *s; + struct uart_port *port; + unsigned int old_ctrl0, old_ctrl2; + unsigned int to = 1000; + + if (co->index > MXS_AUART_PORTS || co->index < 0) + return; + + s = auart_port[co->index]; + port = &s->port; + + clk_enable(s->clk); + + /* First save the CR then disable the interrupts */ + old_ctrl2 = readl(port->membase + AUART_CTRL2); + old_ctrl0 = readl(port->membase + AUART_CTRL0); + + writel(AUART_CTRL0_CLKGATE, + port->membase + AUART_CTRL0_CLR); + writel(AUART_CTRL2_UARTEN | AUART_CTRL2_TXE, + port->membase + AUART_CTRL2_SET); + + uart_console_write(port, str, count, mxs_auart_console_putchar); + + /* + * Finally, wait for transmitter to become empty + * and restore the TCR + */ + while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) { + if (!to--) + break; + udelay(1); + } + + writel(old_ctrl0, port->membase + AUART_CTRL0); + writel(old_ctrl2, port->membase + AUART_CTRL2); + + clk_disable(s->clk); +} + +static void __init +auart_console_get_options(struct uart_port *port, int *baud, + int *parity, int *bits) +{ + unsigned int lcr_h, quot; + + if (!(readl(port->membase + AUART_CTRL2) & AUART_CTRL2_UARTEN)) + return; + + lcr_h = readl(port->membase + AUART_LINECTRL); + + *parity = 'n'; + if (lcr_h & AUART_LINECTRL_PEN) { + if (lcr_h & AUART_LINECTRL_EPS) + *parity = 'e'; + else + *parity = 'o'; + } + + if ((lcr_h & AUART_LINECTRL_WLEN_MASK) == AUART_LINECTRL_WLEN(2)) + *bits = 7; + else + *bits = 8; + + quot = ((readl(port->membase + AUART_LINECTRL) + & AUART_LINECTRL_BAUD_DIVINT_MASK)) + >> (AUART_LINECTRL_BAUD_DIVINT_SHIFT - 6); + quot |= ((readl(port->membase + AUART_LINECTRL) + & AUART_LINECTRL_BAUD_DIVFRAC_MASK)) + >> AUART_LINECTRL_BAUD_DIVFRAC_SHIFT; + if (quot == 0) + quot = 1; + + *baud = (port->uartclk << 2) / quot; +} + +static int __init +auart_console_setup(struct console *co, char *options) +{ + struct mxs_auart_port *s; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + + /* + * Check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index == -1 || co->index >= ARRAY_SIZE(auart_port)) + co->index = 0; + s = auart_port[co->index]; + if (!s) + return -ENODEV; + + clk_enable(s->clk); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + auart_console_get_options(&s->port, &baud, &parity, &bits); + + ret = uart_set_options(&s->port, co, baud, parity, bits, flow); + + clk_disable(s->clk); + + return ret; +} + +static struct console auart_console = { + .name = "ttyAPP", + .write = auart_console_write, + .device = uart_console_device, + .setup = auart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &auart_driver, +}; +#endif + +static struct uart_driver auart_driver = { + .owner = THIS_MODULE, + .driver_name = "ttyAPP", + .dev_name = "ttyAPP", + .major = 0, + .minor = 0, + .nr = MXS_AUART_PORTS, +#ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE + .cons = &auart_console, +#endif +}; + +static int __devinit mxs_auart_probe(struct platform_device *pdev) +{ + struct mxs_auart_port *s; + u32 version; + int ret = 0; + struct resource *r; + + s = kzalloc(sizeof(struct mxs_auart_port), GFP_KERNEL); + if (!s) { + ret = -ENOMEM; + goto out; + } + + s->clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(s->clk)) { + ret = PTR_ERR(s->clk); + goto out_free; + } + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + ret = -ENXIO; + goto out_free_clk; + } + + s->port.mapbase = r->start; + s->port.membase = ioremap(r->start, resource_size(r)); + s->port.ops = &mxs_auart_ops; + s->port.iotype = UPIO_MEM; + s->port.line = pdev->id < 0 ? 0 : pdev->id; + s->port.fifosize = 16; + s->port.uartclk = clk_get_rate(s->clk); + s->port.type = PORT_IMX; + s->port.dev = s->dev = get_device(&pdev->dev); + + s->flags = 0; + s->ctrl = 0; + + s->irq = platform_get_irq(pdev, 0); + s->port.irq = s->irq; + ret = request_irq(s->irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s); + if (ret) + goto out_free_clk; + + platform_set_drvdata(pdev, s); + + auart_port[pdev->id] = s; + + mxs_auart_reset(&s->port); + + ret = uart_add_one_port(&auart_driver, &s->port); + if (ret) + goto out_free_irq; + + version = readl(s->port.membase + AUART_VERSION); + dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n", + (version >> 24) & 0xff, + (version >> 16) & 0xff, version & 0xffff); + + return 0; + +out_free_irq: + auart_port[pdev->id] = NULL; + free_irq(s->irq, s); +out_free_clk: + clk_put(s->clk); +out_free: + kfree(s); +out: + return ret; +} + +static int __devexit mxs_auart_remove(struct platform_device *pdev) +{ + struct mxs_auart_port *s = platform_get_drvdata(pdev); + + uart_remove_one_port(&auart_driver, &s->port); + + auart_port[pdev->id] = NULL; + + clk_put(s->clk); + free_irq(s->irq, s); + kfree(s); + + return 0; +} + +static struct platform_driver mxs_auart_driver = { + .probe = mxs_auart_probe, + .remove = __devexit_p(mxs_auart_remove), + .driver = { + .name = "mxs-auart", + .owner = THIS_MODULE, + }, +}; + +static int __init mxs_auart_init(void) +{ + int r; + + r = uart_register_driver(&auart_driver); + if (r) + goto out; + + r = platform_driver_register(&mxs_auart_driver); + if (r) + goto out_err; + + return 0; +out_err: + uart_unregister_driver(&auart_driver); +out: + return r; +} + +static void __exit mxs_auart_exit(void) +{ + platform_driver_unregister(&mxs_auart_driver); + uart_unregister_driver(&auart_driver); +} + +module_init(mxs_auart_init); +module_exit(mxs_auart_exit); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Freescale MXS application uart driver"); diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index a9ad7f33526d..26403b8e4b9b 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -15,7 +15,6 @@ *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/serial_reg.h> -#include <linux/pci.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/serial_core.h> diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c index ff51dae1df0c..c327218cad44 100644 --- a/drivers/tty/serial/ucc_uart.c +++ b/drivers/tty/serial/ucc_uart.c @@ -1269,13 +1269,12 @@ static int ucc_uart_probe(struct platform_device *ofdev) ret = of_address_to_resource(np, 0, &res); if (ret) { dev_err(&ofdev->dev, "missing 'reg' property in device tree\n"); - kfree(qe_port); - return ret; + goto out_free; } if (!res.start) { dev_err(&ofdev->dev, "invalid 'reg' property in device tree\n"); - kfree(qe_port); - return -EINVAL; + ret = -EINVAL; + goto out_free; } qe_port->port.mapbase = res.start; @@ -1285,17 +1284,17 @@ static int ucc_uart_probe(struct platform_device *ofdev) if (!iprop) { iprop = of_get_property(np, "device-id", NULL); if (!iprop) { - kfree(qe_port); dev_err(&ofdev->dev, "UCC is unspecified in " "device tree\n"); - return -EINVAL; + ret = -EINVAL; + goto out_free; } } if ((*iprop < 1) || (*iprop > UCC_MAX_NUM)) { dev_err(&ofdev->dev, "no support for UCC%u\n", *iprop); - kfree(qe_port); - return -ENODEV; + ret = -ENODEV; + goto out_free; } qe_port->ucc_num = *iprop - 1; @@ -1309,16 +1308,16 @@ static int ucc_uart_probe(struct platform_device *ofdev) sprop = of_get_property(np, "rx-clock-name", NULL); if (!sprop) { dev_err(&ofdev->dev, "missing rx-clock-name in device tree\n"); - kfree(qe_port); - return -ENODEV; + ret = -ENODEV; + goto out_free; } qe_port->us_info.rx_clock = qe_clock_source(sprop); if ((qe_port->us_info.rx_clock < QE_BRG1) || (qe_port->us_info.rx_clock > QE_BRG16)) { dev_err(&ofdev->dev, "rx-clock-name must be a BRG for UART\n"); - kfree(qe_port); - return -ENODEV; + ret = -ENODEV; + goto out_free; } #ifdef LOOPBACK @@ -1328,39 +1327,39 @@ static int ucc_uart_probe(struct platform_device *ofdev) sprop = of_get_property(np, "tx-clock-name", NULL); if (!sprop) { dev_err(&ofdev->dev, "missing tx-clock-name in device tree\n"); - kfree(qe_port); - return -ENODEV; + ret = -ENODEV; + goto out_free; } qe_port->us_info.tx_clock = qe_clock_source(sprop); #endif if ((qe_port->us_info.tx_clock < QE_BRG1) || (qe_port->us_info.tx_clock > QE_BRG16)) { dev_err(&ofdev->dev, "tx-clock-name must be a BRG for UART\n"); - kfree(qe_port); - return -ENODEV; + ret = -ENODEV; + goto out_free; } /* Get the port number, numbered 0-3 */ iprop = of_get_property(np, "port-number", NULL); if (!iprop) { dev_err(&ofdev->dev, "missing port-number in device tree\n"); - kfree(qe_port); - return -EINVAL; + ret = -EINVAL; + goto out_free; } qe_port->port.line = *iprop; if (qe_port->port.line >= UCC_MAX_UART) { dev_err(&ofdev->dev, "port-number must be 0-%u\n", UCC_MAX_UART - 1); - kfree(qe_port); - return -EINVAL; + ret = -EINVAL; + goto out_free; } qe_port->port.irq = irq_of_parse_and_map(np, 0); if (qe_port->port.irq == NO_IRQ) { dev_err(&ofdev->dev, "could not map IRQ for UCC%u\n", qe_port->ucc_num + 1); - kfree(qe_port); - return -EINVAL; + ret = -EINVAL; + goto out_free; } /* @@ -1372,8 +1371,8 @@ static int ucc_uart_probe(struct platform_device *ofdev) np = of_find_node_by_type(NULL, "qe"); if (!np) { dev_err(&ofdev->dev, "could not find 'qe' node\n"); - kfree(qe_port); - return -EINVAL; + ret = -EINVAL; + goto out_free; } } @@ -1381,8 +1380,8 @@ static int ucc_uart_probe(struct platform_device *ofdev) if (!iprop) { dev_err(&ofdev->dev, "missing brg-frequency in device tree\n"); - kfree(qe_port); - return -EINVAL; + ret = -EINVAL; + goto out_np; } if (*iprop) @@ -1397,16 +1396,16 @@ static int ucc_uart_probe(struct platform_device *ofdev) if (!iprop) { dev_err(&ofdev->dev, "missing QE bus-frequency in device tree\n"); - kfree(qe_port); - return -EINVAL; + ret = -EINVAL; + goto out_np; } if (*iprop) qe_port->port.uartclk = *iprop / 2; else { dev_err(&ofdev->dev, "invalid QE bus-frequency in device tree\n"); - kfree(qe_port); - return -EINVAL; + ret = -EINVAL; + goto out_np; } } @@ -1444,8 +1443,7 @@ static int ucc_uart_probe(struct platform_device *ofdev) if (ret) { dev_err(&ofdev->dev, "could not add /dev/ttyQE%u\n", qe_port->port.line); - kfree(qe_port); - return ret; + goto out_np; } dev_set_drvdata(&ofdev->dev, qe_port); @@ -1459,6 +1457,11 @@ static int ucc_uart_probe(struct platform_device *ofdev) SERIAL_QE_MINOR + qe_port->port.line); return 0; +out_np: + of_node_put(np); +out_free: + kfree(qe_port); + return ret; } static int ucc_uart_remove(struct platform_device *ofdev) diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index c8e360d7d975..25c8c10bb689 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -203,11 +203,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) mdelay(10); } - /* setup specific usb hw */ - ret = mxc_initialize_usb_hw(pdev->id, pdata->flags); - if (ret < 0) - goto err_init; - ehci = hcd_to_ehci(hcd); /* EHCI registers start at offset 0x100 */ diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index a914010d9d12..630ae7f3cd4c 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -1530,7 +1530,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) /*-------------------------------------------------------------------------*/ -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) || \ +#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) || \ defined(CONFIG_ARCH_U5500) diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 4f0dd2ed3964..4bd9e2145ee4 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -212,8 +212,8 @@ enum musb_g_ep0_state { * directly with the "flat" model, or after setting up an index register. */ -#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_ARCH_OMAP2430) \ - || defined(CONFIG_ARCH_OMAP3430) || defined(CONFIG_BLACKFIN) \ +#if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \ + || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \ || defined(CONFIG_ARCH_OMAP4) /* REVISIT indexed access seemed to * misbehave (on DaVinci) for at least peripheral IN ... diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h index 21056c924c74..320fd4afb93f 100644 --- a/drivers/usb/musb/musbhsdma.h +++ b/drivers/usb/musb/musbhsdma.h @@ -31,7 +31,7 @@ * */ -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) +#if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) #include "omap2430.h" #endif diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c index e00fa1b22ecd..8c6fdef61d1c 100644 --- a/drivers/usb/otg/isp1301_omap.c +++ b/drivers/usb/otg/isp1301_omap.c @@ -1510,7 +1510,7 @@ isp1301_start_hnp(struct otg_transceiver *dev) /*-------------------------------------------------------------------------*/ -static int __init +static int __devinit isp1301_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { int status; diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 2a753f1e9183..e6a8d8c0101d 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -376,6 +376,24 @@ config FB_CYBER2000 Say Y if you have a NetWinder or a graphics card containing this device, otherwise say N. +config FB_CYBER2000_DDC + bool "DDC for CyberPro support" + depends on FB_CYBER2000 + select FB_DDC + default y + help + Say Y here if you want DDC support for your CyberPro graphics + card. This is only I2C bus support, driver does not use EDID. + +config FB_CYBER2000_I2C + bool "CyberPro 2000/2010/5000 I2C support" + depends on FB_CYBER2000 && I2C && ARCH_NETWINDER + select I2C_ALGOBIT + help + Enable support for the I2C video decoder interface on the + Integraphics CyberPro 20x0 and 5000 VGA chips. This is used + on the Netwinder machines for the SAA7111 video capture. + config FB_APOLLO bool depends on (FB = y) && APOLLO @@ -2303,6 +2321,15 @@ config FB_JZ4740 help Framebuffer support for the JZ4740 SoC. +config FB_MXS + tristate "MXS LCD framebuffer support" + depends on FB && ARCH_MXS + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + Framebuffer support for the MXS SoC. + config FB_PUV3_UNIGFX tristate "PKUnity v3 Unigfx framebuffer support" depends on FB && UNICORE32 && ARCH_PUV3 diff --git a/drivers/video/Makefile b/drivers/video/Makefile index b0eb3da24670..2ea44b6625fe 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -154,6 +154,7 @@ obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o obj-$(CONFIG_FB_BFIN_7393) += bfin_adv7393fb.o obj-$(CONFIG_FB_MX3) += mx3fb.o obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o +obj-$(CONFIG_FB_MXS) += mxsfb.o # the test framebuffer is last obj-$(CONFIG_FB_VIRTUAL) += vfb.o diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c index 1c2c68356ea7..013c8ce57205 100644 --- a/drivers/video/amba-clcd.c +++ b/drivers/video/amba-clcd.c @@ -461,7 +461,7 @@ static int clcdfb_register(struct clcd_fb *fb) return ret; } -static int clcdfb_probe(struct amba_device *dev, struct amba_id *id) +static int clcdfb_probe(struct amba_device *dev, const struct amba_id *id) { struct clcd_board *board = dev->dev.platform_data; struct clcd_fb *fb; diff --git a/drivers/video/cyber2000fb.c b/drivers/video/cyber2000fb.c index 0c1afd13ddd3..850380795b05 100644 --- a/drivers/video/cyber2000fb.c +++ b/drivers/video/cyber2000fb.c @@ -47,6 +47,8 @@ #include <linux/pci.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/i2c.h> +#include <linux/i2c-algo-bit.h> #include <asm/pgtable.h> #include <asm/system.h> @@ -61,10 +63,10 @@ struct cfb_info { struct fb_info fb; struct display_switch *dispsw; struct display *display; - struct pci_dev *dev; unsigned char __iomem *region; unsigned char __iomem *regs; u_int id; + u_int irq; int func_use_count; u_long ref_ps; @@ -88,6 +90,19 @@ struct cfb_info { u_char ramdac_powerdown; u32 pseudo_palette[16]; + + spinlock_t reg_b0_lock; + +#ifdef CONFIG_FB_CYBER2000_DDC + bool ddc_registered; + struct i2c_adapter ddc_adapter; + struct i2c_algo_bit_data ddc_algo; +#endif + +#ifdef CONFIG_FB_CYBER2000_I2C + struct i2c_adapter i2c_adapter; + struct i2c_algo_bit_data i2c_algo; +#endif }; static char *default_font = "Acorn8x8"; @@ -494,6 +509,7 @@ static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw) cyber2000_attrw(0x14, 0x00, cfb); /* PLL registers */ + spin_lock(&cfb->reg_b0_lock); cyber2000_grphw(EXT_DCLK_MULT, hw->clock_mult, cfb); cyber2000_grphw(EXT_DCLK_DIV, hw->clock_div, cfb); cyber2000_grphw(EXT_MCLK_MULT, cfb->mclk_mult, cfb); @@ -501,6 +517,7 @@ static void cyber2000fb_set_timing(struct cfb_info *cfb, struct par_info *hw) cyber2000_grphw(0x90, 0x01, cfb); cyber2000_grphw(0xb9, 0x80, cfb); cyber2000_grphw(0xb9, 0x00, cfb); + spin_unlock(&cfb->reg_b0_lock); cfb->ramdac_ctrl = hw->ramdac; cyber2000fb_write_ramdac_ctrl(cfb); @@ -681,9 +698,9 @@ cyber2000fb_decode_clock(struct par_info *hw, struct cfb_info *cfb, * pll_ps_calc = best_div1 / (ref_ps * best_mult) */ best_diff = 0x7fffffff; - best_mult = 32; - best_div1 = 255; - for (t_div1 = 32; t_div1 > 1; t_div1 -= 1) { + best_mult = 2; + best_div1 = 32; + for (t_div1 = 2; t_div1 < 32; t_div1 += 1) { u_int rr, t_mult, t_pll_ps; int diff; @@ -1105,24 +1122,22 @@ void cyber2000fb_disable_extregs(struct cfb_info *cfb) } EXPORT_SYMBOL(cyber2000fb_disable_extregs); -void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var) -{ - memcpy(var, &cfb->fb.var, sizeof(struct fb_var_screeninfo)); -} -EXPORT_SYMBOL(cyber2000fb_get_fb_var); - /* * Attach a capture/tv driver to the core CyberX0X0 driver. */ int cyber2000fb_attach(struct cyberpro_info *info, int idx) { if (int_cfb_info != NULL) { - info->dev = int_cfb_info->dev; + info->dev = int_cfb_info->fb.device; +#ifdef CONFIG_FB_CYBER2000_I2C + info->i2c = &int_cfb_info->i2c_adapter; +#else + info->i2c = NULL; +#endif info->regs = int_cfb_info->regs; + info->irq = int_cfb_info->irq; info->fb = int_cfb_info->fb.screen_base; info->fb_size = int_cfb_info->fb.fix.smem_len; - info->enable_extregs = cyber2000fb_enable_extregs; - info->disable_extregs = cyber2000fb_disable_extregs; info->info = int_cfb_info; strlcpy(info->dev_name, int_cfb_info->fb.fix.id, @@ -1141,6 +1156,183 @@ void cyber2000fb_detach(int idx) } EXPORT_SYMBOL(cyber2000fb_detach); +#ifdef CONFIG_FB_CYBER2000_DDC + +#define DDC_REG 0xb0 +#define DDC_SCL_OUT (1 << 0) +#define DDC_SDA_OUT (1 << 4) +#define DDC_SCL_IN (1 << 2) +#define DDC_SDA_IN (1 << 6) + +static void cyber2000fb_enable_ddc(struct cfb_info *cfb) +{ + spin_lock(&cfb->reg_b0_lock); + cyber2000fb_writew(0x1bf, 0x3ce, cfb); +} + +static void cyber2000fb_disable_ddc(struct cfb_info *cfb) +{ + cyber2000fb_writew(0x0bf, 0x3ce, cfb); + spin_unlock(&cfb->reg_b0_lock); +} + + +static void cyber2000fb_ddc_setscl(void *data, int val) +{ + struct cfb_info *cfb = data; + unsigned char reg; + + cyber2000fb_enable_ddc(cfb); + reg = cyber2000_grphr(DDC_REG, cfb); + if (!val) /* bit is inverted */ + reg |= DDC_SCL_OUT; + else + reg &= ~DDC_SCL_OUT; + cyber2000_grphw(DDC_REG, reg, cfb); + cyber2000fb_disable_ddc(cfb); +} + +static void cyber2000fb_ddc_setsda(void *data, int val) +{ + struct cfb_info *cfb = data; + unsigned char reg; + + cyber2000fb_enable_ddc(cfb); + reg = cyber2000_grphr(DDC_REG, cfb); + if (!val) /* bit is inverted */ + reg |= DDC_SDA_OUT; + else + reg &= ~DDC_SDA_OUT; + cyber2000_grphw(DDC_REG, reg, cfb); + cyber2000fb_disable_ddc(cfb); +} + +static int cyber2000fb_ddc_getscl(void *data) +{ + struct cfb_info *cfb = data; + int retval; + + cyber2000fb_enable_ddc(cfb); + retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SCL_IN); + cyber2000fb_disable_ddc(cfb); + + return retval; +} + +static int cyber2000fb_ddc_getsda(void *data) +{ + struct cfb_info *cfb = data; + int retval; + + cyber2000fb_enable_ddc(cfb); + retval = !!(cyber2000_grphr(DDC_REG, cfb) & DDC_SDA_IN); + cyber2000fb_disable_ddc(cfb); + + return retval; +} + +static int __devinit cyber2000fb_setup_ddc_bus(struct cfb_info *cfb) +{ + strlcpy(cfb->ddc_adapter.name, cfb->fb.fix.id, + sizeof(cfb->ddc_adapter.name)); + cfb->ddc_adapter.owner = THIS_MODULE; + cfb->ddc_adapter.class = I2C_CLASS_DDC; + cfb->ddc_adapter.algo_data = &cfb->ddc_algo; + cfb->ddc_adapter.dev.parent = cfb->fb.device; + cfb->ddc_algo.setsda = cyber2000fb_ddc_setsda; + cfb->ddc_algo.setscl = cyber2000fb_ddc_setscl; + cfb->ddc_algo.getsda = cyber2000fb_ddc_getsda; + cfb->ddc_algo.getscl = cyber2000fb_ddc_getscl; + cfb->ddc_algo.udelay = 10; + cfb->ddc_algo.timeout = 20; + cfb->ddc_algo.data = cfb; + + i2c_set_adapdata(&cfb->ddc_adapter, cfb); + + return i2c_bit_add_bus(&cfb->ddc_adapter); +} +#endif /* CONFIG_FB_CYBER2000_DDC */ + +#ifdef CONFIG_FB_CYBER2000_I2C +static void cyber2000fb_i2c_setsda(void *data, int state) +{ + struct cfb_info *cfb = data; + unsigned int latch2; + + spin_lock(&cfb->reg_b0_lock); + latch2 = cyber2000_grphr(EXT_LATCH2, cfb); + latch2 &= EXT_LATCH2_I2C_CLKEN; + if (state) + latch2 |= EXT_LATCH2_I2C_DATEN; + cyber2000_grphw(EXT_LATCH2, latch2, cfb); + spin_unlock(&cfb->reg_b0_lock); +} + +static void cyber2000fb_i2c_setscl(void *data, int state) +{ + struct cfb_info *cfb = data; + unsigned int latch2; + + spin_lock(&cfb->reg_b0_lock); + latch2 = cyber2000_grphr(EXT_LATCH2, cfb); + latch2 &= EXT_LATCH2_I2C_DATEN; + if (state) + latch2 |= EXT_LATCH2_I2C_CLKEN; + cyber2000_grphw(EXT_LATCH2, latch2, cfb); + spin_unlock(&cfb->reg_b0_lock); +} + +static int cyber2000fb_i2c_getsda(void *data) +{ + struct cfb_info *cfb = data; + int ret; + + spin_lock(&cfb->reg_b0_lock); + ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_DAT); + spin_unlock(&cfb->reg_b0_lock); + + return ret; +} + +static int cyber2000fb_i2c_getscl(void *data) +{ + struct cfb_info *cfb = data; + int ret; + + spin_lock(&cfb->reg_b0_lock); + ret = !!(cyber2000_grphr(EXT_LATCH2, cfb) & EXT_LATCH2_I2C_CLK); + spin_unlock(&cfb->reg_b0_lock); + + return ret; +} + +static int __devinit cyber2000fb_i2c_register(struct cfb_info *cfb) +{ + strlcpy(cfb->i2c_adapter.name, cfb->fb.fix.id, + sizeof(cfb->i2c_adapter.name)); + cfb->i2c_adapter.owner = THIS_MODULE; + cfb->i2c_adapter.algo_data = &cfb->i2c_algo; + cfb->i2c_adapter.dev.parent = cfb->fb.device; + cfb->i2c_algo.setsda = cyber2000fb_i2c_setsda; + cfb->i2c_algo.setscl = cyber2000fb_i2c_setscl; + cfb->i2c_algo.getsda = cyber2000fb_i2c_getsda; + cfb->i2c_algo.getscl = cyber2000fb_i2c_getscl; + cfb->i2c_algo.udelay = 5; + cfb->i2c_algo.timeout = msecs_to_jiffies(100); + cfb->i2c_algo.data = cfb; + + return i2c_bit_add_bus(&cfb->i2c_adapter); +} + +static void cyber2000fb_i2c_unregister(struct cfb_info *cfb) +{ + i2c_del_adapter(&cfb->i2c_adapter); +} +#else +#define cyber2000fb_i2c_register(cfb) (0) +#define cyber2000fb_i2c_unregister(cfb) do { } while (0) +#endif + /* * These parameters give * 640x480, hsync 31.5kHz, vsync 60Hz @@ -1275,6 +1467,8 @@ static struct cfb_info __devinit *cyberpro_alloc_fb_info(unsigned int id, cfb->fb.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; cfb->fb.pseudo_palette = cfb->pseudo_palette; + spin_lock_init(&cfb->reg_b0_lock); + fb_alloc_cmap(&cfb->fb.cmap, NR_PALETTE, 0); return cfb; @@ -1369,6 +1563,11 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb) cfb->fb.fix.mmio_len = MMIO_SIZE; cfb->fb.screen_base = cfb->region; +#ifdef CONFIG_FB_CYBER2000_DDC + if (cyber2000fb_setup_ddc_bus(cfb) == 0) + cfb->ddc_registered = true; +#endif + err = -EINVAL; if (!fb_find_mode(&cfb->fb.var, &cfb->fb, NULL, NULL, 0, &cyber2000fb_default_mode, 8)) { @@ -1401,14 +1600,32 @@ static int __devinit cyberpro_common_probe(struct cfb_info *cfb) cfb->fb.var.xres, cfb->fb.var.yres, h_sync / 1000, h_sync % 1000, v_sync); - if (cfb->dev) - cfb->fb.device = &cfb->dev->dev; + err = cyber2000fb_i2c_register(cfb); + if (err) + goto failed; + err = register_framebuffer(&cfb->fb); + if (err) + cyber2000fb_i2c_unregister(cfb); failed: +#ifdef CONFIG_FB_CYBER2000_DDC + if (err && cfb->ddc_registered) + i2c_del_adapter(&cfb->ddc_adapter); +#endif return err; } +static void __devexit cyberpro_common_remove(struct cfb_info *cfb) +{ + unregister_framebuffer(&cfb->fb); +#ifdef CONFIG_FB_CYBER2000_DDC + if (cfb->ddc_registered) + i2c_del_adapter(&cfb->ddc_adapter); +#endif + cyber2000fb_i2c_unregister(cfb); +} + static void cyberpro_common_resume(struct cfb_info *cfb) { cyberpro_init_hw(cfb); @@ -1442,12 +1659,13 @@ static int __devinit cyberpro_vl_probe(void) if (!cfb) goto failed_release; - cfb->dev = NULL; + cfb->irq = -1; cfb->region = ioremap(FB_START, FB_SIZE); if (!cfb->region) goto failed_ioremap; cfb->regs = cfb->region + MMIO_OFFSET; + cfb->fb.device = NULL; cfb->fb.fix.mmio_start = FB_START + MMIO_OFFSET; cfb->fb.fix.smem_start = FB_START; @@ -1585,12 +1803,13 @@ cyberpro_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (err) goto failed_regions; - cfb->dev = dev; + cfb->irq = dev->irq; cfb->region = pci_ioremap_bar(dev, 0); if (!cfb->region) goto failed_ioremap; cfb->regs = cfb->region + MMIO_OFFSET; + cfb->fb.device = &dev->dev; cfb->fb.fix.mmio_start = pci_resource_start(dev, 0) + MMIO_OFFSET; cfb->fb.fix.smem_start = pci_resource_start(dev, 0); @@ -1648,15 +1867,7 @@ static void __devexit cyberpro_pci_remove(struct pci_dev *dev) struct cfb_info *cfb = pci_get_drvdata(dev); if (cfb) { - /* - * If unregister_framebuffer fails, then - * we will be leaving hooks that could cause - * oopsen laying around. - */ - if (unregister_framebuffer(&cfb->fb)) - printk(KERN_WARNING "%s: danger Will Robinson, " - "danger danger! Oopsen imminent!\n", - cfb->fb.fix.id); + cyberpro_common_remove(cfb); iounmap(cfb->region); cyberpro_free_fb_info(cfb); diff --git a/drivers/video/cyber2000fb.h b/drivers/video/cyber2000fb.h index de4fc43e51c1..bad69102e774 100644 --- a/drivers/video/cyber2000fb.h +++ b/drivers/video/cyber2000fb.h @@ -464,12 +464,14 @@ static void debug_printf(char *fmt, ...) struct cfb_info; struct cyberpro_info { - struct pci_dev *dev; + struct device *dev; + struct i2c_adapter *i2c; unsigned char __iomem *regs; char __iomem *fb; char dev_name[32]; unsigned int fb_size; unsigned int chip_id; + unsigned int irq; /* * The following is a pointer to be passed into the @@ -478,15 +480,6 @@ struct cyberpro_info { * is within this structure. */ struct cfb_info *info; - - /* - * Use these to enable the BM or TV registers. In an SMP - * environment, these two function pointers should only be - * called from the module_init() or module_exit() - * functions. - */ - void (*enable_extregs)(struct cfb_info *); - void (*disable_extregs)(struct cfb_info *); }; #define ID_IGA_1682 0 @@ -494,8 +487,6 @@ struct cyberpro_info { #define ID_CYBERPRO_2010 2 #define ID_CYBERPRO_5000 3 -struct fb_var_screeninfo; - /* * Note! Writing to the Cyber20x0 registers from an interrupt * routine is definitely a bad idea atm. @@ -504,4 +495,3 @@ int cyber2000fb_attach(struct cyberpro_info *info, int idx); void cyber2000fb_detach(int idx); void cyber2000fb_enable_extregs(struct cfb_info *cfb); void cyber2000fb_disable_extregs(struct cfb_info *cfb); -void cyber2000fb_get_fb_var(struct cfb_info *cfb, struct fb_var_screeninfo *var); diff --git a/drivers/video/msm/mdp_hw.h b/drivers/video/msm/mdp_hw.h index 4e3deb4e592b..d80477415caa 100644 --- a/drivers/video/msm/mdp_hw.h +++ b/drivers/video/msm/mdp_hw.h @@ -449,6 +449,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, #define PPP_CFG_MDP_XRGB_8888(dir) PPP_CFG_MDP_ARGB_8888(dir) #define PPP_CFG_MDP_RGBA_8888(dir) PPP_CFG_MDP_ARGB_8888(dir) #define PPP_CFG_MDP_BGRA_8888(dir) PPP_CFG_MDP_ARGB_8888(dir) +#define PPP_CFG_MDP_RGBX_8888(dir) PPP_CFG_MDP_ARGB_8888(dir) #define PPP_CFG_MDP_Y_CBCR_H2V2(dir) (PPP_##dir##_C2R_8BIT | \ PPP_##dir##_C0G_8BIT | \ @@ -488,12 +489,14 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, MDP_GET_PACK_PATTERN(0, CLR_R, CLR_G, CLR_B, 8) #define PPP_PACK_PATTERN_MDP_RGB_888 PPP_PACK_PATTERN_MDP_RGB_565 #define PPP_PACK_PATTERN_MDP_XRGB_8888 \ - MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B, 8) + MDP_GET_PACK_PATTERN(CLR_B, CLR_G, CLR_R, CLR_ALPHA, 8) #define PPP_PACK_PATTERN_MDP_ARGB_8888 PPP_PACK_PATTERN_MDP_XRGB_8888 #define PPP_PACK_PATTERN_MDP_RGBA_8888 \ MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R, 8) #define PPP_PACK_PATTERN_MDP_BGRA_8888 \ MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_R, CLR_G, CLR_B, 8) +#define PPP_PACK_PATTERN_MDP_RGBX_8888 \ + MDP_GET_PACK_PATTERN(CLR_ALPHA, CLR_B, CLR_G, CLR_R, 8) #define PPP_PACK_PATTERN_MDP_Y_CBCR_H2V1 \ MDP_GET_PACK_PATTERN(0, 0, CLR_CB, CLR_CR, 8) #define PPP_PACK_PATTERN_MDP_Y_CBCR_H2V2 PPP_PACK_PATTERN_MDP_Y_CBCR_H2V1 @@ -509,6 +512,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, #define PPP_CHROMA_SAMP_MDP_ARGB_8888(dir) PPP_OP_##dir##_CHROMA_RGB #define PPP_CHROMA_SAMP_MDP_RGBA_8888(dir) PPP_OP_##dir##_CHROMA_RGB #define PPP_CHROMA_SAMP_MDP_BGRA_8888(dir) PPP_OP_##dir##_CHROMA_RGB +#define PPP_CHROMA_SAMP_MDP_RGBX_8888(dir) PPP_OP_##dir##_CHROMA_RGB #define PPP_CHROMA_SAMP_MDP_Y_CBCR_H2V1(dir) PPP_OP_##dir##_CHROMA_H2V1 #define PPP_CHROMA_SAMP_MDP_Y_CBCR_H2V2(dir) PPP_OP_##dir##_CHROMA_420 #define PPP_CHROMA_SAMP_MDP_Y_CRCB_H2V1(dir) PPP_OP_##dir##_CHROMA_H2V1 @@ -523,6 +527,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, [MDP_ARGB_8888] = PPP_##name##_MDP_ARGB_8888,\ [MDP_RGBA_8888] = PPP_##name##_MDP_RGBA_8888,\ [MDP_BGRA_8888] = PPP_##name##_MDP_BGRA_8888,\ + [MDP_RGBX_8888] = PPP_##name##_MDP_RGBX_8888,\ [MDP_Y_CBCR_H2V1] = PPP_##name##_MDP_Y_CBCR_H2V1,\ [MDP_Y_CBCR_H2V2] = PPP_##name##_MDP_Y_CBCR_H2V2,\ [MDP_Y_CRCB_H2V1] = PPP_##name##_MDP_Y_CRCB_H2V1,\ @@ -536,6 +541,7 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, [MDP_ARGB_8888] = PPP_##name##_MDP_ARGB_8888(dir),\ [MDP_RGBA_8888] = PPP_##name##_MDP_RGBA_8888(dir),\ [MDP_BGRA_8888] = PPP_##name##_MDP_BGRA_8888(dir),\ + [MDP_RGBX_8888] = PPP_##name##_MDP_RGBX_8888(dir),\ [MDP_Y_CBCR_H2V1] = PPP_##name##_MDP_Y_CBCR_H2V1(dir),\ [MDP_Y_CBCR_H2V2] = PPP_##name##_MDP_Y_CBCR_H2V2(dir),\ [MDP_Y_CRCB_H2V1] = PPP_##name##_MDP_Y_CRCB_H2V1(dir),\ @@ -547,7 +553,8 @@ int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, (img == MDP_YCRYCB_H2V1)) #define IS_RGB(img) ((img == MDP_RGB_565) | (img == MDP_RGB_888) | \ (img == MDP_ARGB_8888) | (img == MDP_RGBA_8888) | \ - (img == MDP_XRGB_8888) | (img == MDP_BGRA_8888)) + (img == MDP_XRGB_8888) | (img == MDP_BGRA_8888) | \ + (img == MDP_RGBX_8888)) #define HAS_ALPHA(img) ((img == MDP_ARGB_8888) | (img == MDP_RGBA_8888) | \ (img == MDP_BGRA_8888)) diff --git a/drivers/video/msm/mdp_ppp.c b/drivers/video/msm/mdp_ppp.c index 4ff001f4cbbd..2b6564e8bfea 100644 --- a/drivers/video/msm/mdp_ppp.c +++ b/drivers/video/msm/mdp_ppp.c @@ -69,6 +69,7 @@ static uint32_t bytes_per_pixel[] = { [MDP_ARGB_8888] = 4, [MDP_RGBA_8888] = 4, [MDP_BGRA_8888] = 4, + [MDP_RGBX_8888] = 4, [MDP_Y_CBCR_H2V1] = 1, [MDP_Y_CBCR_H2V2] = 1, [MDP_Y_CRCB_H2V1] = 1, diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index debe5933fd2e..ec351309e607 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -81,7 +81,6 @@ struct msmfb_info { spinlock_t update_lock; struct mutex panel_init_lock; wait_queue_head_t frame_wq; - struct workqueue_struct *resume_workqueue; struct work_struct resume_work; struct msmfb_callback dma_callback; struct msmfb_callback vsync_callback; @@ -111,7 +110,7 @@ static void msmfb_handle_dma_interrupt(struct msmfb_callback *callback) if (msmfb->sleeping == UPDATING && msmfb->frame_done == msmfb->update_frame) { DLOG(SUSPEND_RESUME, "full update completed\n"); - queue_work(msmfb->resume_workqueue, &msmfb->resume_work); + schedule_work(&msmfb->resume_work); } spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); wake_up(&msmfb->frame_wq); @@ -220,8 +219,8 @@ restart: sleeping = msmfb->sleeping; /* on a full update, if the last frame has not completed, wait for it */ - if (pan_display && (msmfb->frame_requested != msmfb->frame_done || - sleeping == UPDATING)) { + if ((pan_display && msmfb->frame_requested != msmfb->frame_done) || + sleeping == UPDATING) { int ret; spin_unlock_irqrestore(&msmfb->update_lock, irq_flags); ret = wait_event_interruptible_timeout(msmfb->frame_wq, @@ -470,6 +469,18 @@ static void setup_fb_info(struct msmfb_info *msmfb) fb_info->var.yoffset = 0; if (msmfb->panel->caps & MSMFB_CAP_PARTIAL_UPDATES) { + /* + * Set the param in the fixed screen, so userspace can't + * change it. This will be used to check for the + * capability. + */ + fb_info->fix.reserved[0] = 0x5444; + fb_info->fix.reserved[1] = 0x5055; + + /* + * This preloads the value so that if userspace doesn't + * change it, it will be a full update + */ fb_info->var.reserved[0] = 0x54445055; fb_info->var.reserved[1] = 0; fb_info->var.reserved[2] = (uint16_t)msmfb->xres | @@ -559,12 +570,6 @@ static int msmfb_probe(struct platform_device *pdev) spin_lock_init(&msmfb->update_lock); mutex_init(&msmfb->panel_init_lock); init_waitqueue_head(&msmfb->frame_wq); - msmfb->resume_workqueue = create_workqueue("panel_on"); - if (msmfb->resume_workqueue == NULL) { - printk(KERN_ERR "failed to create panel_on workqueue\n"); - ret = -ENOMEM; - goto error_create_workqueue; - } INIT_WORK(&msmfb->resume_work, power_on_panel); msmfb->black = kzalloc(msmfb->fb->var.bits_per_pixel*msmfb->xres, GFP_KERNEL); @@ -589,8 +594,6 @@ static int msmfb_probe(struct platform_device *pdev) return 0; error_register_framebuffer: - destroy_workqueue(msmfb->resume_workqueue); -error_create_workqueue: iounmap(fb->screen_base); error_setup_fbmem: framebuffer_release(msmfb->fb); diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c new file mode 100644 index 000000000000..7d0284882984 --- /dev/null +++ b/drivers/video/mxsfb.c @@ -0,0 +1,919 @@ +/* + * Copyright (C) 2010 Juergen Beisert, Pengutronix + * + * This code is based on: + * Author: Vitaly Wool <vital@embeddedalley.com> + * + * Copyright 2008-2009 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define DRIVER_NAME "mxsfb" + +/** + * @file + * @brief LCDIF driver for i.MX23 and i.MX28 + * + * The LCDIF support four modes of operation + * - MPU interface (to drive smart displays) -> not supported yet + * - VSYNC interface (like MPU interface plus Vsync) -> not supported yet + * - Dotclock interface (to drive LC displays with RGB data and sync signals) + * - DVI (to drive ITU-R BT656) -> not supported yet + * + * This driver depends on a correct setup of the pins used for this purpose + * (platform specific). + * + * For the developer: Don't forget to set the data bus width to the display + * in the imx_fb_videomode structure. You will else end up with ugly colours. + * If you fight against jitter you can vary the clock delay. This is a feature + * of the i.MX28 and you can vary it between 2 ns ... 8 ns in 2 ns steps. Give + * the required value in the imx_fb_videomode structure. + */ + +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <mach/mxsfb.h> + +#define REG_SET 4 +#define REG_CLR 8 + +#define LCDC_CTRL 0x00 +#define LCDC_CTRL1 0x10 +#define LCDC_V4_CTRL2 0x20 +#define LCDC_V3_TRANSFER_COUNT 0x20 +#define LCDC_V4_TRANSFER_COUNT 0x30 +#define LCDC_V4_CUR_BUF 0x40 +#define LCDC_V4_NEXT_BUF 0x50 +#define LCDC_V3_CUR_BUF 0x30 +#define LCDC_V3_NEXT_BUF 0x40 +#define LCDC_TIMING 0x60 +#define LCDC_VDCTRL0 0x70 +#define LCDC_VDCTRL1 0x80 +#define LCDC_VDCTRL2 0x90 +#define LCDC_VDCTRL3 0xa0 +#define LCDC_VDCTRL4 0xb0 +#define LCDC_DVICTRL0 0xc0 +#define LCDC_DVICTRL1 0xd0 +#define LCDC_DVICTRL2 0xe0 +#define LCDC_DVICTRL3 0xf0 +#define LCDC_DVICTRL4 0x100 +#define LCDC_V4_DATA 0x180 +#define LCDC_V3_DATA 0x1b0 +#define LCDC_V4_DEBUG0 0x1d0 +#define LCDC_V3_DEBUG0 0x1f0 + +#define CTRL_SFTRST (1 << 31) +#define CTRL_CLKGATE (1 << 30) +#define CTRL_BYPASS_COUNT (1 << 19) +#define CTRL_VSYNC_MODE (1 << 18) +#define CTRL_DOTCLK_MODE (1 << 17) +#define CTRL_DATA_SELECT (1 << 16) +#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10) +#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3) +#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8) +#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3) +#define CTRL_MASTER (1 << 5) +#define CTRL_DF16 (1 << 3) +#define CTRL_DF18 (1 << 2) +#define CTRL_DF24 (1 << 1) +#define CTRL_RUN (1 << 0) + +#define CTRL1_FIFO_CLEAR (1 << 21) +#define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16) +#define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf) + +#define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16) +#define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff) +#define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff) +#define TRANSFER_COUNT_GET_HCOUNT(x) ((x) & 0xffff) + + +#define VDCTRL0_ENABLE_PRESENT (1 << 28) +#define VDCTRL0_VSYNC_ACT_HIGH (1 << 27) +#define VDCTRL0_HSYNC_ACT_HIGH (1 << 26) +#define VDCTRL0_DOTCLK_ACT_FAILING (1 << 25) +#define VDCTRL0_ENABLE_ACT_HIGH (1 << 24) +#define VDCTRL0_VSYNC_PERIOD_UNIT (1 << 21) +#define VDCTRL0_VSYNC_PULSE_WIDTH_UNIT (1 << 20) +#define VDCTRL0_HALF_LINE (1 << 19) +#define VDCTRL0_HALF_LINE_MODE (1 << 18) +#define VDCTRL0_SET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff) +#define VDCTRL0_GET_VSYNC_PULSE_WIDTH(x) ((x) & 0x3ffff) + +#define VDCTRL2_SET_HSYNC_PERIOD(x) ((x) & 0x3ffff) +#define VDCTRL2_GET_HSYNC_PERIOD(x) ((x) & 0x3ffff) + +#define VDCTRL3_MUX_SYNC_SIGNALS (1 << 29) +#define VDCTRL3_VSYNC_ONLY (1 << 28) +#define SET_HOR_WAIT_CNT(x) (((x) & 0xfff) << 16) +#define GET_HOR_WAIT_CNT(x) (((x) >> 16) & 0xfff) +#define SET_VERT_WAIT_CNT(x) ((x) & 0xffff) +#define GET_VERT_WAIT_CNT(x) ((x) & 0xffff) + +#define VDCTRL4_SET_DOTCLK_DLY(x) (((x) & 0x7) << 29) /* v4 only */ +#define VDCTRL4_GET_DOTCLK_DLY(x) (((x) >> 29) & 0x7) /* v4 only */ +#define VDCTRL4_SYNC_SIGNALS_ON (1 << 18) +#define SET_DOTCLK_H_VALID_DATA_CNT(x) ((x) & 0x3ffff) + +#define DEBUG0_HSYNC (1 < 26) +#define DEBUG0_VSYNC (1 < 25) + +#define MIN_XRES 120 +#define MIN_YRES 120 + +#define RED 0 +#define GREEN 1 +#define BLUE 2 +#define TRANSP 3 + +enum mxsfb_devtype { + MXSFB_V3, + MXSFB_V4, +}; + +/* CPU dependent register offsets */ +struct mxsfb_devdata { + unsigned transfer_count; + unsigned cur_buf; + unsigned next_buf; + unsigned debug0; + unsigned hs_wdth_mask; + unsigned hs_wdth_shift; + unsigned ipversion; +}; + +struct mxsfb_info { + struct fb_info fb_info; + struct platform_device *pdev; + struct clk *clk; + void __iomem *base; /* registers */ + unsigned allocated_size; + int enabled; + unsigned ld_intf_width; + unsigned dotclk_delay; + const struct mxsfb_devdata *devdata; + int mapped; +}; + +#define mxsfb_is_v3(host) (host->devdata->ipversion == 3) +#define mxsfb_is_v4(host) (host->devdata->ipversion == 4) + +static const struct mxsfb_devdata mxsfb_devdata[] = { + [MXSFB_V3] = { + .transfer_count = LCDC_V3_TRANSFER_COUNT, + .cur_buf = LCDC_V3_CUR_BUF, + .next_buf = LCDC_V3_NEXT_BUF, + .debug0 = LCDC_V3_DEBUG0, + .hs_wdth_mask = 0xff, + .hs_wdth_shift = 24, + .ipversion = 3, + }, + [MXSFB_V4] = { + .transfer_count = LCDC_V4_TRANSFER_COUNT, + .cur_buf = LCDC_V4_CUR_BUF, + .next_buf = LCDC_V4_NEXT_BUF, + .debug0 = LCDC_V4_DEBUG0, + .hs_wdth_mask = 0x3fff, + .hs_wdth_shift = 18, + .ipversion = 4, + }, +}; + +#define to_imxfb_host(x) (container_of(x, struct mxsfb_info, fb_info)) + +/* mask and shift depends on architecture */ +static inline u32 set_hsync_pulse_width(struct mxsfb_info *host, unsigned val) +{ + return (val & host->devdata->hs_wdth_mask) << + host->devdata->hs_wdth_shift; +} + +static inline u32 get_hsync_pulse_width(struct mxsfb_info *host, unsigned val) +{ + return (val >> host->devdata->hs_wdth_shift) & + host->devdata->hs_wdth_mask; +} + +static const struct fb_bitfield def_rgb565[] = { + [RED] = { + .offset = 11, + .length = 5, + }, + [GREEN] = { + .offset = 5, + .length = 6, + }, + [BLUE] = { + .offset = 0, + .length = 5, + }, + [TRANSP] = { /* no support for transparency */ + .length = 0, + } +}; + +static const struct fb_bitfield def_rgb666[] = { + [RED] = { + .offset = 16, + .length = 6, + }, + [GREEN] = { + .offset = 8, + .length = 6, + }, + [BLUE] = { + .offset = 0, + .length = 6, + }, + [TRANSP] = { /* no support for transparency */ + .length = 0, + } +}; + +static const struct fb_bitfield def_rgb888[] = { + [RED] = { + .offset = 16, + .length = 8, + }, + [GREEN] = { + .offset = 8, + .length = 8, + }, + [BLUE] = { + .offset = 0, + .length = 8, + }, + [TRANSP] = { /* no support for transparency */ + .length = 0, + } +}; + +static inline unsigned chan_to_field(unsigned chan, struct fb_bitfield *bf) +{ + chan &= 0xffff; + chan >>= 16 - bf->length; + return chan << bf->offset; +} + +static int mxsfb_check_var(struct fb_var_screeninfo *var, + struct fb_info *fb_info) +{ + struct mxsfb_info *host = to_imxfb_host(fb_info); + const struct fb_bitfield *rgb = NULL; + + if (var->xres < MIN_XRES) + var->xres = MIN_XRES; + if (var->yres < MIN_YRES) + var->yres = MIN_YRES; + + var->xres_virtual = var->xres; + + var->yres_virtual = var->yres; + + switch (var->bits_per_pixel) { + case 16: + /* always expect RGB 565 */ + rgb = def_rgb565; + break; + case 32: + switch (host->ld_intf_width) { + case STMLCDIF_8BIT: + pr_debug("Unsupported LCD bus width mapping\n"); + break; + case STMLCDIF_16BIT: + case STMLCDIF_18BIT: + /* 24 bit to 18 bit mapping */ + rgb = def_rgb666; + break; + case STMLCDIF_24BIT: + /* real 24 bit */ + rgb = def_rgb888; + break; + } + break; + default: + pr_debug("Unsupported colour depth: %u\n", var->bits_per_pixel); + return -EINVAL; + } + + /* + * Copy the RGB parameters for this display + * from the machine specific parameters. + */ + var->red = rgb[RED]; + var->green = rgb[GREEN]; + var->blue = rgb[BLUE]; + var->transp = rgb[TRANSP]; + + return 0; +} + +static void mxsfb_enable_controller(struct fb_info *fb_info) +{ + struct mxsfb_info *host = to_imxfb_host(fb_info); + u32 reg; + + dev_dbg(&host->pdev->dev, "%s\n", __func__); + + clk_enable(host->clk); + clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U); + + /* if it was disabled, re-enable the mode again */ + writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_SET); + + /* enable the SYNC signals first, then the DMA engine */ + reg = readl(host->base + LCDC_VDCTRL4); + reg |= VDCTRL4_SYNC_SIGNALS_ON; + writel(reg, host->base + LCDC_VDCTRL4); + + writel(CTRL_RUN, host->base + LCDC_CTRL + REG_SET); + + host->enabled = 1; +} + +static void mxsfb_disable_controller(struct fb_info *fb_info) +{ + struct mxsfb_info *host = to_imxfb_host(fb_info); + unsigned loop; + u32 reg; + + dev_dbg(&host->pdev->dev, "%s\n", __func__); + + /* + * Even if we disable the controller here, it will still continue + * until its FIFOs are running out of data + */ + writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_CLR); + + loop = 1000; + while (loop) { + reg = readl(host->base + LCDC_CTRL); + if (!(reg & CTRL_RUN)) + break; + loop--; + } + + writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR); + + clk_disable(host->clk); + + host->enabled = 0; +} + +static int mxsfb_set_par(struct fb_info *fb_info) +{ + struct mxsfb_info *host = to_imxfb_host(fb_info); + u32 ctrl, vdctrl0, vdctrl4; + int line_size, fb_size; + int reenable = 0; + + line_size = fb_info->var.xres * (fb_info->var.bits_per_pixel >> 3); + fb_size = fb_info->var.yres_virtual * line_size; + + if (fb_size > fb_info->fix.smem_len) + return -ENOMEM; + + fb_info->fix.line_length = line_size; + + /* + * It seems, you can't re-program the controller if it is still running. + * This may lead into shifted pictures (FIFO issue?). + * So, first stop the controller and drain its FIFOs + */ + if (host->enabled) { + reenable = 1; + mxsfb_disable_controller(fb_info); + } + + /* clear the FIFOs */ + writel(CTRL1_FIFO_CLEAR, host->base + LCDC_CTRL1 + REG_SET); + + ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER | + CTRL_SET_BUS_WIDTH(host->ld_intf_width);; + + switch (fb_info->var.bits_per_pixel) { + case 16: + dev_dbg(&host->pdev->dev, "Setting up RGB565 mode\n"); + ctrl |= CTRL_SET_WORD_LENGTH(0); + writel(CTRL1_SET_BYTE_PACKAGING(0xf), host->base + LCDC_CTRL1); + break; + case 32: + dev_dbg(&host->pdev->dev, "Setting up RGB888/666 mode\n"); + ctrl |= CTRL_SET_WORD_LENGTH(3); + switch (host->ld_intf_width) { + case STMLCDIF_8BIT: + dev_dbg(&host->pdev->dev, + "Unsupported LCD bus width mapping\n"); + return -EINVAL; + case STMLCDIF_16BIT: + case STMLCDIF_18BIT: + /* 24 bit to 18 bit mapping */ + ctrl |= CTRL_DF24; /* ignore the upper 2 bits in + * each colour component + */ + break; + case STMLCDIF_24BIT: + /* real 24 bit */ + break; + } + /* do not use packed pixels = one pixel per word instead */ + writel(CTRL1_SET_BYTE_PACKAGING(0x7), host->base + LCDC_CTRL1); + break; + default: + dev_dbg(&host->pdev->dev, "Unhandled color depth of %u\n", + fb_info->var.bits_per_pixel); + return -EINVAL; + } + + writel(ctrl, host->base + LCDC_CTRL); + + writel(TRANSFER_COUNT_SET_VCOUNT(fb_info->var.yres) | + TRANSFER_COUNT_SET_HCOUNT(fb_info->var.xres), + host->base + host->devdata->transfer_count); + + vdctrl0 = VDCTRL0_ENABLE_PRESENT | /* always in DOTCLOCK mode */ + VDCTRL0_VSYNC_PERIOD_UNIT | + VDCTRL0_VSYNC_PULSE_WIDTH_UNIT | + VDCTRL0_SET_VSYNC_PULSE_WIDTH(fb_info->var.vsync_len); + if (fb_info->var.sync & FB_SYNC_HOR_HIGH_ACT) + vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; + if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT) + vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; + if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT) + vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; + if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT) + vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING; + + writel(vdctrl0, host->base + LCDC_VDCTRL0); + + /* frame length in lines */ + writel(fb_info->var.upper_margin + fb_info->var.vsync_len + + fb_info->var.lower_margin + fb_info->var.yres, + host->base + LCDC_VDCTRL1); + + /* line length in units of clocks or pixels */ + writel(set_hsync_pulse_width(host, fb_info->var.hsync_len) | + VDCTRL2_SET_HSYNC_PERIOD(fb_info->var.left_margin + + fb_info->var.hsync_len + fb_info->var.right_margin + + fb_info->var.xres), + host->base + LCDC_VDCTRL2); + + writel(SET_HOR_WAIT_CNT(fb_info->var.left_margin + + fb_info->var.hsync_len) | + SET_VERT_WAIT_CNT(fb_info->var.upper_margin + + fb_info->var.vsync_len), + host->base + LCDC_VDCTRL3); + + vdctrl4 = SET_DOTCLK_H_VALID_DATA_CNT(fb_info->var.xres); + if (mxsfb_is_v4(host)) + vdctrl4 |= VDCTRL4_SET_DOTCLK_DLY(host->dotclk_delay); + writel(vdctrl4, host->base + LCDC_VDCTRL4); + + writel(fb_info->fix.smem_start + + fb_info->fix.line_length * fb_info->var.yoffset, + host->base + host->devdata->next_buf); + + if (reenable) + mxsfb_enable_controller(fb_info); + + return 0; +} + +static int mxsfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *fb_info) +{ + unsigned int val; + int ret = -EINVAL; + + /* + * If greyscale is true, then we convert the RGB value + * to greyscale no matter what visual we are using. + */ + if (fb_info->var.grayscale) + red = green = blue = (19595 * red + 38470 * green + + 7471 * blue) >> 16; + + switch (fb_info->fix.visual) { + case FB_VISUAL_TRUECOLOR: + /* + * 12 or 16-bit True Colour. We encode the RGB value + * according to the RGB bitfield information. + */ + if (regno < 16) { + u32 *pal = fb_info->pseudo_palette; + + val = chan_to_field(red, &fb_info->var.red); + val |= chan_to_field(green, &fb_info->var.green); + val |= chan_to_field(blue, &fb_info->var.blue); + + pal[regno] = val; + ret = 0; + } + break; + + case FB_VISUAL_STATIC_PSEUDOCOLOR: + case FB_VISUAL_PSEUDOCOLOR: + break; + } + + return ret; +} + +static int mxsfb_blank(int blank, struct fb_info *fb_info) +{ + struct mxsfb_info *host = to_imxfb_host(fb_info); + + switch (blank) { + case FB_BLANK_POWERDOWN: + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: + case FB_BLANK_NORMAL: + if (host->enabled) + mxsfb_disable_controller(fb_info); + break; + + case FB_BLANK_UNBLANK: + if (!host->enabled) + mxsfb_enable_controller(fb_info); + break; + } + return 0; +} + +static int mxsfb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *fb_info) +{ + struct mxsfb_info *host = to_imxfb_host(fb_info); + unsigned offset; + + if (var->xoffset != 0) + return -EINVAL; + + offset = fb_info->fix.line_length * var->yoffset; + + /* update on next VSYNC */ + writel(fb_info->fix.smem_start + offset, + host->base + host->devdata->next_buf); + + return 0; +} + +static struct fb_ops mxsfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = mxsfb_check_var, + .fb_set_par = mxsfb_set_par, + .fb_setcolreg = mxsfb_setcolreg, + .fb_blank = mxsfb_blank, + .fb_pan_display = mxsfb_pan_display, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, +}; + +static int __devinit mxsfb_restore_mode(struct mxsfb_info *host) +{ + struct fb_info *fb_info = &host->fb_info; + unsigned line_count; + unsigned period; + unsigned long pa, fbsize; + int bits_per_pixel, ofs; + u32 transfer_count, vdctrl0, vdctrl2, vdctrl3, vdctrl4, ctrl; + struct fb_videomode vmode; + + /* Only restore the mode when the controller is running */ + ctrl = readl(host->base + LCDC_CTRL); + if (!(ctrl & CTRL_RUN)) + return -EINVAL; + + vdctrl0 = readl(host->base + LCDC_VDCTRL0); + vdctrl2 = readl(host->base + LCDC_VDCTRL2); + vdctrl3 = readl(host->base + LCDC_VDCTRL3); + vdctrl4 = readl(host->base + LCDC_VDCTRL4); + + transfer_count = readl(host->base + host->devdata->transfer_count); + + vmode.xres = TRANSFER_COUNT_GET_HCOUNT(transfer_count); + vmode.yres = TRANSFER_COUNT_GET_VCOUNT(transfer_count); + + switch (CTRL_GET_WORD_LENGTH(ctrl)) { + case 0: + bits_per_pixel = 16; + break; + case 3: + bits_per_pixel = 32; + case 1: + default: + return -EINVAL; + } + + fb_info->var.bits_per_pixel = bits_per_pixel; + + vmode.pixclock = KHZ2PICOS(clk_get_rate(host->clk) / 1000U); + vmode.hsync_len = get_hsync_pulse_width(host, vdctrl2); + vmode.left_margin = GET_HOR_WAIT_CNT(vdctrl3) - vmode.hsync_len; + vmode.right_margin = VDCTRL2_GET_HSYNC_PERIOD(vdctrl2) - vmode.hsync_len - + vmode.left_margin - vmode.xres; + vmode.vsync_len = VDCTRL0_GET_VSYNC_PULSE_WIDTH(vdctrl0); + period = readl(host->base + LCDC_VDCTRL1); + vmode.upper_margin = GET_VERT_WAIT_CNT(vdctrl3) - vmode.vsync_len; + vmode.lower_margin = period - vmode.vsync_len - vmode.upper_margin - vmode.yres; + + vmode.vmode = FB_VMODE_NONINTERLACED; + + vmode.sync = 0; + if (vdctrl0 & VDCTRL0_HSYNC_ACT_HIGH) + vmode.sync |= FB_SYNC_HOR_HIGH_ACT; + if (vdctrl0 & VDCTRL0_VSYNC_ACT_HIGH) + vmode.sync |= FB_SYNC_VERT_HIGH_ACT; + + pr_debug("Reconstructed video mode:\n"); + pr_debug("%dx%d, hsync: %u left: %u, right: %u, vsync: %u, upper: %u, lower: %u\n", + vmode.xres, vmode.yres, + vmode.hsync_len, vmode.left_margin, vmode.right_margin, + vmode.vsync_len, vmode.upper_margin, vmode.lower_margin); + pr_debug("pixclk: %ldkHz\n", PICOS2KHZ(vmode.pixclock)); + + fb_add_videomode(&vmode, &fb_info->modelist); + + host->ld_intf_width = CTRL_GET_BUS_WIDTH(ctrl); + host->dotclk_delay = VDCTRL4_GET_DOTCLK_DLY(vdctrl4); + + fb_info->fix.line_length = vmode.xres * (bits_per_pixel >> 3); + + pa = readl(host->base + host->devdata->cur_buf); + fbsize = fb_info->fix.line_length * vmode.yres; + if (pa < fb_info->fix.smem_start) + return -EINVAL; + if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len) + return -EINVAL; + ofs = pa - fb_info->fix.smem_start; + if (ofs) { + memmove(fb_info->screen_base, fb_info->screen_base + ofs, fbsize); + writel(fb_info->fix.smem_start, host->base + host->devdata->next_buf); + } + + line_count = fb_info->fix.smem_len / fb_info->fix.line_length; + fb_info->fix.ypanstep = 1; + + clk_enable(host->clk); + host->enabled = 1; + + return 0; +} + +static int __devinit mxsfb_init_fbinfo(struct mxsfb_info *host) +{ + struct fb_info *fb_info = &host->fb_info; + struct fb_var_screeninfo *var = &fb_info->var; + struct mxsfb_platform_data *pdata = host->pdev->dev.platform_data; + dma_addr_t fb_phys; + void *fb_virt; + unsigned fb_size = pdata->fb_size; + + fb_info->fbops = &mxsfb_ops; + fb_info->flags = FBINFO_FLAG_DEFAULT | FBINFO_READS_FAST; + strlcpy(fb_info->fix.id, "mxs", sizeof(fb_info->fix.id)); + fb_info->fix.type = FB_TYPE_PACKED_PIXELS; + fb_info->fix.ypanstep = 1; + fb_info->fix.visual = FB_VISUAL_TRUECOLOR, + fb_info->fix.accel = FB_ACCEL_NONE; + + var->bits_per_pixel = pdata->default_bpp ? pdata->default_bpp : 16; + var->nonstd = 0; + var->activate = FB_ACTIVATE_NOW; + var->accel_flags = 0; + var->vmode = FB_VMODE_NONINTERLACED; + + host->dotclk_delay = pdata->dotclk_delay; + host->ld_intf_width = pdata->ld_intf_width; + + /* Memory allocation for framebuffer */ + if (pdata->fb_phys) { + if (!fb_size) + return -EINVAL; + + fb_phys = pdata->fb_phys; + + if (!request_mem_region(fb_phys, fb_size, host->pdev->name)) + return -ENOMEM; + + fb_virt = ioremap(fb_phys, fb_size); + if (!fb_virt) { + release_mem_region(fb_phys, fb_size); + return -ENOMEM; + } + host->mapped = 1; + } else { + if (!fb_size) + fb_size = SZ_2M; /* default */ + fb_virt = alloc_pages_exact(fb_size, GFP_DMA); + if (!fb_virt) + return -ENOMEM; + + fb_phys = virt_to_phys(fb_virt); + } + + fb_info->fix.smem_start = fb_phys; + fb_info->screen_base = fb_virt; + fb_info->screen_size = fb_info->fix.smem_len = fb_size; + + if (mxsfb_restore_mode(host)) + memset(fb_virt, 0, fb_size); + + return 0; +} + +static void __devexit mxsfb_free_videomem(struct mxsfb_info *host) +{ + struct fb_info *fb_info = &host->fb_info; + + if (host->mapped) { + iounmap(fb_info->screen_base); + release_mem_region(fb_info->fix.smem_start, + fb_info->screen_size); + } else { + free_pages_exact(fb_info->screen_base, fb_info->fix.smem_len); + } +} + +static int __devinit mxsfb_probe(struct platform_device *pdev) +{ + struct mxsfb_platform_data *pdata = pdev->dev.platform_data; + struct resource *res; + struct mxsfb_info *host; + struct fb_info *fb_info; + struct fb_modelist *modelist; + int i, ret; + + if (!pdata) { + dev_err(&pdev->dev, "No platformdata. Giving up\n"); + return -ENODEV; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "Cannot get memory IO resource\n"); + return -ENODEV; + } + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) + return -EBUSY; + + fb_info = framebuffer_alloc(sizeof(struct mxsfb_info), &pdev->dev); + if (!fb_info) { + dev_err(&pdev->dev, "Failed to allocate fbdev\n"); + ret = -ENOMEM; + goto error_alloc_info; + } + + host = to_imxfb_host(fb_info); + + host->base = ioremap(res->start, resource_size(res)); + if (!host->base) { + dev_err(&pdev->dev, "ioremap failed\n"); + ret = -ENOMEM; + goto error_ioremap; + } + + host->pdev = pdev; + platform_set_drvdata(pdev, host); + + host->devdata = &mxsfb_devdata[pdev->id_entry->driver_data]; + + host->clk = clk_get(&host->pdev->dev, NULL); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto error_getclock; + } + + fb_info->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL); + if (!fb_info->pseudo_palette) { + ret = -ENOMEM; + goto error_pseudo_pallette; + } + + INIT_LIST_HEAD(&fb_info->modelist); + + ret = mxsfb_init_fbinfo(host); + if (ret != 0) + goto error_init_fb; + + for (i = 0; i < pdata->mode_count; i++) + fb_add_videomode(&pdata->mode_list[i], &fb_info->modelist); + + modelist = list_first_entry(&fb_info->modelist, + struct fb_modelist, list); + fb_videomode_to_var(&fb_info->var, &modelist->mode); + + /* init the color fields */ + mxsfb_check_var(&fb_info->var, fb_info); + + platform_set_drvdata(pdev, fb_info); + + ret = register_framebuffer(fb_info); + if (ret != 0) { + dev_err(&pdev->dev,"Failed to register framebuffer\n"); + goto error_register; + } + + if (!host->enabled) { + writel(0, host->base + LCDC_CTRL); + mxsfb_set_par(fb_info); + mxsfb_enable_controller(fb_info); + } + + dev_info(&pdev->dev, "initialized\n"); + + return 0; + +error_register: + if (host->enabled) + clk_disable(host->clk); + fb_destroy_modelist(&fb_info->modelist); +error_init_fb: + kfree(fb_info->pseudo_palette); +error_pseudo_pallette: + clk_put(host->clk); +error_getclock: + iounmap(host->base); +error_ioremap: + framebuffer_release(fb_info); +error_alloc_info: + release_mem_region(res->start, resource_size(res)); + + return ret; +} + +static int __devexit mxsfb_remove(struct platform_device *pdev) +{ + struct fb_info *fb_info = platform_get_drvdata(pdev); + struct mxsfb_info *host = to_imxfb_host(fb_info); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (host->enabled) + mxsfb_disable_controller(fb_info); + + unregister_framebuffer(fb_info); + kfree(fb_info->pseudo_palette); + mxsfb_free_videomem(host); + iounmap(host->base); + clk_put(host->clk); + + framebuffer_release(fb_info); + release_mem_region(res->start, resource_size(res)); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_device_id mxsfb_devtype[] = { + { + .name = "imx23-fb", + .driver_data = MXSFB_V3, + }, { + .name = "imx28-fb", + .driver_data = MXSFB_V4, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, mxsfb_devtype); + +static struct platform_driver mxsfb_driver = { + .probe = mxsfb_probe, + .remove = __devexit_p(mxsfb_remove), + .id_table = mxsfb_devtype, + .driver = { + .name = DRIVER_NAME, + }, +}; + +static int __init mxsfb_init(void) +{ + return platform_driver_register(&mxsfb_driver); +} + +static void __exit mxsfb_exit(void) +{ + platform_driver_unregister(&mxsfb_driver); +} + +module_init(mxsfb_init); +module_exit(mxsfb_exit); + +MODULE_DESCRIPTION("Freescale mxs framebuffer driver"); +MODULE_AUTHOR("Sascha Hauer, Pengutronix"); +MODULE_LICENSE("GPL"); diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig index 80b3b123dd7f..7c608c5ccf84 100644 --- a/drivers/w1/masters/Kconfig +++ b/drivers/w1/masters/Kconfig @@ -60,7 +60,7 @@ config W1_MASTER_GPIO config HDQ_MASTER_OMAP tristate "OMAP HDQ driver" - depends on ARCH_OMAP2430 || ARCH_OMAP3 + depends on SOC_OMAP2430 || ARCH_OMAP3 help Say Y here if you want support for the 1-wire or HDQ Interface on an OMAP processor. diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 908160e938dc..b69d71482554 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -533,6 +533,16 @@ config I6300ESB_WDT To compile this driver as a module, choose M here: the module will be called i6300esb. +config INTEL_SCU_WATCHDOG + bool "Intel SCU Watchdog for Mobile Platforms" + depends on WATCHDOG + depends on INTEL_SCU_IPC + ---help--- + Hardware driver for the watchdog time built into the Intel SCU + for Intel Mobile Platforms. + + To compile this driver as a module, choose M here. + config ITCO_WDT tristate "Intel TCO Timer/Watchdog" depends on (X86 || IA64) && PCI @@ -580,7 +590,7 @@ config IT87_WDT depends on X86 && EXPERIMENTAL ---help--- This is the driver for the hardware watchdog on the ITE IT8702, - IT8712, IT8716, IT8718, IT8720, IT8726, IT8712 Super I/O chips. + IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 Super I/O chips. This watchdog simply watches your kernel to make sure it doesn't freeze, and if it does, it reboots your computer after a certain amount of time. @@ -589,18 +599,20 @@ config IT87_WDT be called it87_wdt. config HP_WATCHDOG - tristate "HP Proliant iLO2+ Hardware Watchdog Timer" + tristate "HP ProLiant iLO2+ Hardware Watchdog Timer" depends on X86 + default m help A software monitoring watchdog and NMI sourcing driver. This driver will detect lockups and provide a stack trace. This is a driver that - will only load on a HP ProLiant system with a minimum of iLO2 support. + will only load on an HP ProLiant system with a minimum of iLO2 support. To compile this driver as a module, choose M here: the module will be called hpwdt. config HPWDT_NMI_DECODING bool "NMI decoding support for the HP ProLiant iLO2+ Hardware Watchdog Timer" depends on HP_WATCHDOG + default y help When an NMI occurs this feature will make the necessary BIOS calls to log the cause of the NMI. @@ -903,6 +915,12 @@ config INDYDOG timer expired and no process has written to /dev/watchdog during that time. +config JZ4740_WDT + tristate "Ingenic jz4740 SoC hardware watchdog" + depends on MACH_JZ4740 + help + Hardware driver for the built-in watchdog timer on Ingenic jz4740 SoCs. + config WDT_MTX1 tristate "MTX-1 Hardware Watchdog" depends on MIPS_MTX1 @@ -1111,6 +1129,16 @@ config WATCHDOG_RIO # XTENSA Architecture +# Xen Architecture + +config XEN_WDT + tristate "Xen Watchdog support" + depends on XEN + help + Say Y here to support the hypervisor watchdog capability provided + by Xen 4.0 and newer. The watchdog timeout period is normally one + minute but can be changed with a boot-time parameter. + # # ISA-based Watchdog Cards # diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 20e44c4782b3..d520bf9c3355 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -102,6 +102,7 @@ obj-$(CONFIG_W83877F_WDT) += w83877f_wdt.o obj-$(CONFIG_W83977F_WDT) += w83977f_wdt.o obj-$(CONFIG_MACHZ_WDT) += machzwd.o obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o +obj-$(CONFIG_INTEL_SCU_WATCHDOG) += intel_scu_watchdog.o # M32R Architecture @@ -114,6 +115,7 @@ obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o obj-$(CONFIG_INDYDOG) += indydog.o +obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o @@ -148,6 +150,9 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o # XTENSA Architecture +# Xen +obj-$(CONFIG_XEN_WDT) += xen_wdt.o + # Architecture Independant obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c index fa4d36033552..f16dcbd475fb 100644 --- a/drivers/watchdog/alim1535_wdt.c +++ b/drivers/watchdog/alim1535_wdt.c @@ -301,7 +301,7 @@ static int ali_notify_sys(struct notifier_block *this, * want to register another driver on the same PCI id. */ -static struct pci_device_id ali_pci_tbl[] __used = { +static DEFINE_PCI_DEVICE_TABLE(ali_pci_tbl) __used = { { PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,}, { PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,}, { 0, }, @@ -362,12 +362,12 @@ static int __init ali_find_watchdog(void) */ static const struct file_operations ali_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, + .owner = THIS_MODULE, + .llseek = no_llseek, .write = ali_write, .unlocked_ioctl = ali_ioctl, - .open = ali_open, - .release = ali_release, + .open = ali_open, + .release = ali_release, }; static struct miscdevice ali_miscdev = { diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c index 4b7a2b4138ed..46f4b85b46de 100644 --- a/drivers/watchdog/alim7101_wdt.c +++ b/drivers/watchdog/alim7101_wdt.c @@ -430,7 +430,7 @@ err_out: module_init(alim7101_wdt_init); module_exit(alim7101_wdt_unload); -static struct pci_device_id alim7101_pci_tbl[] __devinitdata __used = { +static DEFINE_PCI_DEVICE_TABLE(alim7101_pci_tbl) __used = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) }, { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, { } diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c index 5f245522397b..bd44417c84d4 100644 --- a/drivers/watchdog/bcm47xx_wdt.c +++ b/drivers/watchdog/bcm47xx_wdt.c @@ -150,8 +150,8 @@ static ssize_t bcm47xx_wdt_write(struct file *file, const char __user *data, } static const struct watchdog_info bcm47xx_wdt_info = { - .identity = DRV_NAME, - .options = WDIOF_SETTIMEOUT | + .identity = DRV_NAME, + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; diff --git a/drivers/watchdog/bfin_wdt.c b/drivers/watchdog/bfin_wdt.c index 9042a95fc98c..b9fa9b71583a 100644 --- a/drivers/watchdog/bfin_wdt.c +++ b/drivers/watchdog/bfin_wdt.c @@ -63,7 +63,7 @@ static DEFINE_SPINLOCK(bfin_wdt_spinlock); /** * bfin_wdt_keepalive - Keep the Userspace Watchdog Alive * - * The Userspace watchdog got a KeepAlive: schedule the next timeout. + * The Userspace watchdog got a KeepAlive: schedule the next timeout. */ static int bfin_wdt_keepalive(void) { @@ -337,7 +337,7 @@ static int bfin_wdt_resume(struct platform_device *pdev) static const struct file_operations bfin_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, - .write = bfin_wdt_write, + .write = bfin_wdt_write, .unlocked_ioctl = bfin_wdt_ioctl, .open = bfin_wdt_open, .release = bfin_wdt_release, diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 7e7ec9c35b6a..337265b47305 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c @@ -4,7 +4,7 @@ * Author: Matthew McClintock * Maintainer: Kumar Gala <galak@kernel.crashing.org> * - * Copyright 2005, 2008, 2010 Freescale Semiconductor Inc. + * Copyright 2005, 2008, 2010-2011 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -221,9 +221,8 @@ static int booke_wdt_open(struct inode *inode, struct file *file) if (booke_wdt_enabled == 0) { booke_wdt_enabled = 1; on_each_cpu(__booke_wdt_enable, NULL, 0); - printk(KERN_INFO - "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n", - booke_wdt_period); + pr_debug("booke_wdt: watchdog enabled (timeout = %llu sec)\n", + period_to_sec(booke_wdt_period)); } spin_unlock(&booke_wdt_lock); @@ -240,6 +239,7 @@ static int booke_wdt_release(struct inode *inode, struct file *file) */ on_each_cpu(__booke_wdt_disable, NULL, 0); booke_wdt_enabled = 0; + pr_debug("booke_wdt: watchdog disabled\n"); #endif clear_bit(0, &wdt_is_active); @@ -271,21 +271,20 @@ static int __init booke_wdt_init(void) { int ret = 0; - printk(KERN_INFO "PowerPC Book-E Watchdog Timer Loaded\n"); + pr_info("booke_wdt: powerpc book-e watchdog driver loaded\n"); ident.firmware_version = cur_cpu_spec->pvr_value; ret = misc_register(&booke_wdt_miscdev); if (ret) { - printk(KERN_CRIT "Cannot register miscdev on minor=%d: %d\n", - WATCHDOG_MINOR, ret); + pr_err("booke_wdt: cannot register device (minor=%u, ret=%i)\n", + WATCHDOG_MINOR, ret); return ret; } spin_lock(&booke_wdt_lock); if (booke_wdt_enabled == 1) { - printk(KERN_INFO - "PowerPC Book-E Watchdog Timer Enabled (wdt_period=%d)\n", - booke_wdt_period); + pr_info("booke_wdt: watchdog enabled (timeout = %llu sec)\n", + period_to_sec(booke_wdt_period)); on_each_cpu(__booke_wdt_enable, NULL, 0); } spin_unlock(&booke_wdt_lock); diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index 65911678453d..1e013e8457b7 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -5,10 +5,10 @@ * interface and Solaris-compatible ioctls as best it is * able. * - * NOTE: CP1400 systems appear to have a defective intr_mask - * register on the PLD, preventing the disabling of - * timer interrupts. We use a timer to periodically - * reset 'stopped' watchdogs on affected platforms. + * NOTE: CP1400 systems appear to have a defective intr_mask + * register on the PLD, preventing the disabling of + * timer interrupts. We use a timer to periodically + * reset 'stopped' watchdogs on affected platforms. * * Copyright (c) 2000 Eric Brower (ebrower@usa.net) * Copyright (C) 2008 David S. Miller <davem@davemloft.net> @@ -107,13 +107,13 @@ static struct cpwd *cpwd_device; * ------------------- * |- counter val -| * ------------------- - * dcntr - Current 16-bit downcounter value. - * When downcounter reaches '0' watchdog expires. - * Reading this register resets downcounter with - * 'limit' value. - * limit - 16-bit countdown value in 1/10th second increments. - * Writing this register begins countdown with input value. - * Reading from this register does not affect counter. + * dcntr - Current 16-bit downcounter value. + * When downcounter reaches '0' watchdog expires. + * Reading this register resets downcounter with + * 'limit' value. + * limit - 16-bit countdown value in 1/10th second increments. + * Writing this register begins countdown with input value. + * Reading from this register does not affect counter. * NOTES: After watchdog reset, dcntr and limit contain '1' * * status register (byte access): @@ -123,7 +123,7 @@ static struct cpwd *cpwd_device; * |- UNUSED -| EXP | RUN | * --------------------------- * status- Bit 0 - Watchdog is running - * Bit 1 - Watchdog has expired + * Bit 1 - Watchdog has expired * *** PLD register block definition (struct wd_pld_regblk) * @@ -197,7 +197,7 @@ static u8 cpwd_readb(void __iomem *addr) * Because of the CP1400 defect this should only be * called during initialzation or by wd_[start|stop]timer() * - * index - sub-device index, or -1 for 'all' + * index - sub-device index, or -1 for 'all' * enable - non-zero to enable interrupts, zero to disable */ static void cpwd_toggleintr(struct cpwd *p, int index, int enable) @@ -317,13 +317,13 @@ static int cpwd_getstatus(struct cpwd *p, int index) } else { /* Fudge WD_EXPIRED status for defective CP1400-- * IF timer is running - * AND brokenstop is set - * AND an interrupt has been serviced + * AND brokenstop is set + * AND an interrupt has been serviced * we are WD_EXPIRED. * * IF timer is running - * AND brokenstop is set - * AND no interrupt has been serviced + * AND brokenstop is set + * AND no interrupt has been serviced * we are WD_FREERUN. */ if (p->broken && @@ -613,7 +613,7 @@ static int __devinit cpwd_probe(struct platform_device *op) if (p->broken) { init_timer(&cpwd_timer); - cpwd_timer.function = cpwd_brokentimer; + cpwd_timer.function = cpwd_brokentimer; cpwd_timer.data = (unsigned long) p; cpwd_timer.expires = WD_BTIMEOUT; diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c index 3f3dc093ad68..f1d1da662fbe 100644 --- a/drivers/watchdog/eurotechwdt.c +++ b/drivers/watchdog/eurotechwdt.c @@ -201,7 +201,7 @@ static void eurwdt_ping(void) static ssize_t eurwdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - if (count) { + if (count) { if (!nowayout) { size_t i; diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 204a5603c4ae..8cb26855bfed 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -52,7 +52,7 @@ static void __iomem *pci_mem_addr; /* the PCI-memory address */ static unsigned long __iomem *hpwdt_timer_reg; static unsigned long __iomem *hpwdt_timer_con; -static struct pci_device_id hpwdt_devices[] = { +static DEFINE_PCI_DEVICE_TABLE(hpwdt_devices) = { { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */ { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */ {0}, /* terminate list */ diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c index bb9750a03942..db45091ef434 100644 --- a/drivers/watchdog/i6300esb.c +++ b/drivers/watchdog/i6300esb.c @@ -334,7 +334,7 @@ static struct miscdevice esb_miscdev = { /* * Data for PCI driver interface */ -static struct pci_device_id esb_pci_tbl[] = { +static DEFINE_PCI_DEVICE_TABLE(esb_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9), }, { 0, }, /* End of list */ }; diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index 2c6c2b4ad8bf..35a0d12dad73 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -247,7 +247,7 @@ static struct { {NULL, 0} }; -#define ITCO_PCI_DEVICE(dev, data) \ +#define ITCO_PCI_DEVICE(dev, data) \ .vendor = PCI_VENDOR_ID_INTEL, \ .device = dev, \ .subvendor = PCI_ANY_ID, \ @@ -262,7 +262,7 @@ static struct { * pci_driver, because the I/O Controller Hub has also other * functions that probably will be registered by other drivers. */ -static struct pci_device_id iTCO_wdt_pci_tbl[] = { +static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = { { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AA_0, TCO_ICH)}, { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801AB_0, TCO_ICH0)}, { ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_82801BA_0, TCO_ICH2)}, diff --git a/drivers/watchdog/intel_scu_watchdog.c b/drivers/watchdog/intel_scu_watchdog.c new file mode 100644 index 000000000000..919bdd16136f --- /dev/null +++ b/drivers/watchdog/intel_scu_watchdog.c @@ -0,0 +1,572 @@ +/* + * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device + * for Intel part #(s): + * - AF82MP20 PCH + * + * Copyright (C) 2009-2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General + * Public License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * The full GNU General Public License is included in this + * distribution in the file called COPYING. + * + */ + +#include <linux/compiler.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/miscdevice.h> +#include <linux/watchdog.h> +#include <linux/fs.h> +#include <linux/notifier.h> +#include <linux/reboot.h> +#include <linux/init.h> +#include <linux/jiffies.h> +#include <linux/uaccess.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/sfi.h> +#include <linux/types.h> +#include <asm/irq.h> +#include <asm/atomic.h> +#include <asm/intel_scu_ipc.h> +#include <asm/apb_timer.h> +#include <asm/mrst.h> + +#include "intel_scu_watchdog.h" + +/* Bounds number of times we will retry loading time count */ +/* This retry is a work around for a silicon bug. */ +#define MAX_RETRY 16 + +#define IPC_SET_WATCHDOG_TIMER 0xF8 + +static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN; +module_param(timer_margin, int, 0); +MODULE_PARM_DESC(timer_margin, + "Watchdog timer margin" + "Time between interrupt and resetting the system" + "The range is from 1 to 160" + "This is the time for all keep alives to arrive"); + +static int timer_set = DEFAULT_TIME; +module_param(timer_set, int, 0); +MODULE_PARM_DESC(timer_set, + "Default Watchdog timer setting" + "Complete cycle time" + "The range is from 1 to 170" + "This is the time for all keep alives to arrive"); + +/* After watchdog device is closed, check force_boot. If: + * force_boot == 0, then force boot on next watchdog interrupt after close, + * force_boot == 1, then force boot immediately when device is closed. + */ +static int force_boot; +module_param(force_boot, int, 0); +MODULE_PARM_DESC(force_boot, + "A value of 1 means that the driver will reboot" + "the system immediately if the /dev/watchdog device is closed" + "A value of 0 means that when /dev/watchdog device is closed" + "the watchdog timer will be refreshed for one more interval" + "of length: timer_set. At the end of this interval, the" + "watchdog timer will reset the system." + ); + +/* there is only one device in the system now; this can be made into + * an array in the future if we have more than one device */ + +static struct intel_scu_watchdog_dev watchdog_device; + +/* Forces restart, if force_reboot is set */ +static void watchdog_fire(void) +{ + if (force_boot) { + printk(KERN_CRIT PFX "Initiating system reboot.\n"); + emergency_restart(); + printk(KERN_CRIT PFX "Reboot didn't ?????\n"); + } + + else { + printk(KERN_CRIT PFX "Immediate Reboot Disabled\n"); + printk(KERN_CRIT PFX + "System will reset when watchdog timer times out!\n"); + } +} + +static int check_timer_margin(int new_margin) +{ + if ((new_margin < MIN_TIME_CYCLE) || + (new_margin > MAX_TIME - timer_set)) { + pr_debug("Watchdog timer: value of new_margin %d is out of the range %d to %d\n", + new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set); + return -EINVAL; + } + return 0; +} + +/* + * IPC operations + */ +static int watchdog_set_ipc(int soft_threshold, int threshold) +{ + u32 *ipc_wbuf; + u8 cbuf[16] = { '\0' }; + int ipc_ret = 0; + + ipc_wbuf = (u32 *)&cbuf; + ipc_wbuf[0] = soft_threshold; + ipc_wbuf[1] = threshold; + + ipc_ret = intel_scu_ipc_command( + IPC_SET_WATCHDOG_TIMER, + 0, + ipc_wbuf, + 2, + NULL, + 0); + + if (ipc_ret != 0) + pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret); + + return ipc_ret; +}; + +/* + * Intel_SCU operations + */ + +/* timer interrupt handler */ +static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id) +{ + int int_status; + int_status = ioread32(watchdog_device.timer_interrupt_status_addr); + + pr_debug("Watchdog timer: irq, int_status: %x\n", int_status); + + if (int_status != 0) + return IRQ_NONE; + + /* has the timer been started? If not, then this is spurious */ + if (watchdog_device.timer_started == 0) { + pr_debug("Watchdog timer: spurious interrupt received\n"); + return IRQ_HANDLED; + } + + /* temporarily disable the timer */ + iowrite32(0x00000002, watchdog_device.timer_control_addr); + + /* set the timer to the threshold */ + iowrite32(watchdog_device.threshold, + watchdog_device.timer_load_count_addr); + + /* allow the timer to run */ + iowrite32(0x00000003, watchdog_device.timer_control_addr); + + return IRQ_HANDLED; +} + +static int intel_scu_keepalive(void) +{ + + /* read eoi register - clears interrupt */ + ioread32(watchdog_device.timer_clear_interrupt_addr); + + /* temporarily disable the timer */ + iowrite32(0x00000002, watchdog_device.timer_control_addr); + + /* set the timer to the soft_threshold */ + iowrite32(watchdog_device.soft_threshold, + watchdog_device.timer_load_count_addr); + + /* allow the timer to run */ + iowrite32(0x00000003, watchdog_device.timer_control_addr); + + return 0; +} + +static int intel_scu_stop(void) +{ + iowrite32(0, watchdog_device.timer_control_addr); + return 0; +} + +static int intel_scu_set_heartbeat(u32 t) +{ + int ipc_ret; + int retry_count; + u32 soft_value; + u32 hw_pre_value; + u32 hw_value; + + watchdog_device.timer_set = t; + watchdog_device.threshold = + timer_margin * watchdog_device.timer_tbl_ptr->freq_hz; + watchdog_device.soft_threshold = + (watchdog_device.timer_set - timer_margin) + * watchdog_device.timer_tbl_ptr->freq_hz; + + pr_debug("Watchdog timer: set_heartbeat: timer freq is %d\n", + watchdog_device.timer_tbl_ptr->freq_hz); + pr_debug("Watchdog timer: set_heartbeat: timer_set is %x (hex)\n", + watchdog_device.timer_set); + pr_debug("Watchdog timer: set_hearbeat: timer_margin is %x (hex)\n", + timer_margin); + pr_debug("Watchdog timer: set_heartbeat: threshold is %x (hex)\n", + watchdog_device.threshold); + pr_debug("Watchdog timer: set_heartbeat: soft_threshold is %x (hex)\n", + watchdog_device.soft_threshold); + + /* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */ + /* watchdog timing come out right. */ + watchdog_device.threshold = + watchdog_device.threshold / FREQ_ADJUSTMENT; + watchdog_device.soft_threshold = + watchdog_device.soft_threshold / FREQ_ADJUSTMENT; + + /* temporarily disable the timer */ + iowrite32(0x00000002, watchdog_device.timer_control_addr); + + /* send the threshold and soft_threshold via IPC to the processor */ + ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold, + watchdog_device.threshold); + + if (ipc_ret != 0) { + /* Make sure the watchdog timer is stopped */ + intel_scu_stop(); + return ipc_ret; + } + + /* Soft Threshold set loop. Early versions of silicon did */ + /* not always set this count correctly. This loop checks */ + /* the value and retries if it was not set correctly. */ + + retry_count = 0; + soft_value = watchdog_device.soft_threshold & 0xFFFF0000; + do { + + /* Make sure timer is stopped */ + intel_scu_stop(); + + if (MAX_RETRY < retry_count++) { + /* Unable to set timer value */ + pr_err("Watchdog timer: Unable to set timer\n"); + return -ENODEV; + } + + /* set the timer to the soft threshold */ + iowrite32(watchdog_device.soft_threshold, + watchdog_device.timer_load_count_addr); + + /* read count value before starting timer */ + hw_pre_value = ioread32(watchdog_device.timer_load_count_addr); + hw_pre_value = hw_pre_value & 0xFFFF0000; + + /* Start the timer */ + iowrite32(0x00000003, watchdog_device.timer_control_addr); + + /* read the value the time loaded into its count reg */ + hw_value = ioread32(watchdog_device.timer_load_count_addr); + hw_value = hw_value & 0xFFFF0000; + + + } while (soft_value != hw_value); + + watchdog_device.timer_started = 1; + + return 0; +} + +/* + * /dev/watchdog handling + */ + +static int intel_scu_open(struct inode *inode, struct file *file) +{ + + /* Set flag to indicate that watchdog device is open */ + if (test_and_set_bit(0, &watchdog_device.driver_open)) + return -EBUSY; + + /* Check for reopen of driver. Reopens are not allowed */ + if (watchdog_device.driver_closed) + return -EPERM; + + return nonseekable_open(inode, file); +} + +static int intel_scu_release(struct inode *inode, struct file *file) +{ + /* + * This watchdog should not be closed, after the timer + * is started with the WDIPC_SETTIMEOUT ioctl + * If force_boot is set watchdog_fire() will cause an + * immediate reset. If force_boot is not set, the watchdog + * timer is refreshed for one more interval. At the end + * of that interval, the watchdog timer will reset the system. + */ + + if (!test_and_clear_bit(0, &watchdog_device.driver_open)) { + pr_debug("Watchdog timer: intel_scu_release, without open\n"); + return -ENOTTY; + } + + if (!watchdog_device.timer_started) { + /* Just close, since timer has not been started */ + pr_debug("Watchdog timer: closed, without starting timer\n"); + return 0; + } + + printk(KERN_CRIT PFX + "Unexpected close of /dev/watchdog!\n"); + + /* Since the timer was started, prevent future reopens */ + watchdog_device.driver_closed = 1; + + /* Refresh the timer for one more interval */ + intel_scu_keepalive(); + + /* Reboot system (if force_boot is set) */ + watchdog_fire(); + + /* We should only reach this point if force_boot is not set */ + return 0; +} + +static ssize_t intel_scu_write(struct file *file, + char const *data, + size_t len, + loff_t *ppos) +{ + + if (watchdog_device.timer_started) + /* Watchdog already started, keep it alive */ + intel_scu_keepalive(); + else + /* Start watchdog with timer value set by init */ + intel_scu_set_heartbeat(watchdog_device.timer_set); + + return len; +} + +static long intel_scu_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + u32 __user *p = argp; + u32 new_margin; + + + static const struct watchdog_info ident = { + .options = WDIOF_SETTIMEOUT + | WDIOF_KEEPALIVEPING, + .firmware_version = 0, /* @todo Get from SCU via + ipc_get_scu_fw_version()? */ + .identity = "Intel_SCU IOH Watchdog" /* len < 32 */ + }; + + switch (cmd) { + case WDIOC_GETSUPPORT: + return copy_to_user(argp, + &ident, + sizeof(ident)) ? -EFAULT : 0; + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0, p); + case WDIOC_KEEPALIVE: + intel_scu_keepalive(); + + return 0; + case WDIOC_SETTIMEOUT: + if (get_user(new_margin, p)) + return -EFAULT; + + if (check_timer_margin(new_margin)) + return -EINVAL; + + if (intel_scu_set_heartbeat(new_margin)) + return -EINVAL; + return 0; + case WDIOC_GETTIMEOUT: + return put_user(watchdog_device.soft_threshold, p); + + default: + return -ENOTTY; + } +} + +/* + * Notifier for system down + */ +static int intel_scu_notify_sys(struct notifier_block *this, + unsigned long code, + void *another_unused) +{ + if (code == SYS_DOWN || code == SYS_HALT) + /* Turn off the watchdog timer. */ + intel_scu_stop(); + return NOTIFY_DONE; +} + +/* + * Kernel Interfaces + */ +static const struct file_operations intel_scu_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = intel_scu_write, + .unlocked_ioctl = intel_scu_ioctl, + .open = intel_scu_open, + .release = intel_scu_release, +}; + +static int __init intel_scu_watchdog_init(void) +{ + int ret; + u32 __iomem *tmp_addr; + + /* + * We don't really need to check this as the SFI timer get will fail + * but if we do so we can exit with a clearer reason and no noise. + * + * If it isn't an intel MID device then it doesn't have this watchdog + */ + if (!mrst_identify_cpu()) + return -ENODEV; + + /* Check boot parameters to verify that their initial values */ + /* are in range. */ + /* Check value of timer_set boot parameter */ + if ((timer_set < MIN_TIME_CYCLE) || + (timer_set > MAX_TIME - MIN_TIME_CYCLE)) { + pr_err("Watchdog timer: value of timer_set %x (hex) " + "is out of range from %x to %x (hex)\n", + timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE); + return -EINVAL; + } + + /* Check value of timer_margin boot parameter */ + if (check_timer_margin(timer_margin)) + return -EINVAL; + + watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1); + + if (watchdog_device.timer_tbl_ptr == NULL) { + pr_debug("Watchdog timer - Intel SCU watchdog: timer is not available\n"); + return -ENODEV; + } + /* make sure the timer exists */ + if (watchdog_device.timer_tbl_ptr->phys_addr == 0) { + pr_debug("Watchdog timer - Intel SCU watchdog - timer %d does not have valid physical memory\n", + sfi_mtimer_num); + return -ENODEV; + } + + if (watchdog_device.timer_tbl_ptr->irq == 0) { + pr_debug("Watchdog timer: timer %d invalid irq\n", + sfi_mtimer_num); + return -ENODEV; + } + + tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr, + 20); + + if (tmp_addr == NULL) { + pr_debug("Watchdog timer: timer unable to ioremap\n"); + return -ENOMEM; + } + + watchdog_device.timer_load_count_addr = tmp_addr++; + watchdog_device.timer_current_value_addr = tmp_addr++; + watchdog_device.timer_control_addr = tmp_addr++; + watchdog_device.timer_clear_interrupt_addr = tmp_addr++; + watchdog_device.timer_interrupt_status_addr = tmp_addr++; + + /* Set the default time values in device structure */ + + watchdog_device.timer_set = timer_set; + watchdog_device.threshold = + timer_margin * watchdog_device.timer_tbl_ptr->freq_hz; + watchdog_device.soft_threshold = + (watchdog_device.timer_set - timer_margin) + * watchdog_device.timer_tbl_ptr->freq_hz; + + + watchdog_device.intel_scu_notifier.notifier_call = + intel_scu_notify_sys; + + ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier); + if (ret) { + pr_err("Watchdog timer: cannot register notifier %d)\n", ret); + goto register_reboot_error; + } + + watchdog_device.miscdev.minor = WATCHDOG_MINOR; + watchdog_device.miscdev.name = "watchdog"; + watchdog_device.miscdev.fops = &intel_scu_fops; + + ret = misc_register(&watchdog_device.miscdev); + if (ret) { + pr_err("Watchdog timer: cannot register miscdev %d err =%d\n", + WATCHDOG_MINOR, ret); + goto misc_register_error; + } + + ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq, + watchdog_timer_interrupt, + IRQF_SHARED, "watchdog", + &watchdog_device.timer_load_count_addr); + if (ret) { + pr_err("Watchdog timer: error requesting irq %d\n", ret); + goto request_irq_error; + } + /* Make sure timer is disabled before returning */ + intel_scu_stop(); + return 0; + +/* error cleanup */ + +request_irq_error: + misc_deregister(&watchdog_device.miscdev); +misc_register_error: + unregister_reboot_notifier(&watchdog_device.intel_scu_notifier); +register_reboot_error: + intel_scu_stop(); + iounmap(watchdog_device.timer_load_count_addr); + return ret; +} + +static void __exit intel_scu_watchdog_exit(void) +{ + + misc_deregister(&watchdog_device.miscdev); + unregister_reboot_notifier(&watchdog_device.intel_scu_notifier); + /* disable the timer */ + iowrite32(0x00000002, watchdog_device.timer_control_addr); + iounmap(watchdog_device.timer_load_count_addr); +} + +late_initcall(intel_scu_watchdog_init); +module_exit(intel_scu_watchdog_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); +MODULE_VERSION(WDT_VER); diff --git a/drivers/watchdog/intel_scu_watchdog.h b/drivers/watchdog/intel_scu_watchdog.h new file mode 100644 index 000000000000..d2b074a82db6 --- /dev/null +++ b/drivers/watchdog/intel_scu_watchdog.h @@ -0,0 +1,66 @@ +/* + * Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device + * for Intel part #(s): + * - AF82MP20 PCH + * + * Copyright (C) 2009-2010 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General + * Public License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be + * useful, but WITHOUT ANY WARRANTY; without even the implied + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR + * PURPOSE. See the GNU General Public License for more details. + * You should have received a copy of the GNU General Public + * License along with this program; if not, write to the Free + * Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307, USA. + * The full GNU General Public License is included in this + * distribution in the file called COPYING. + * + */ + +#ifndef __INTEL_SCU_WATCHDOG_H +#define __INTEL_SCU_WATCHDOG_H + +#define PFX "Intel_SCU: " +#define WDT_VER "0.3" + +/* minimum time between interrupts */ +#define MIN_TIME_CYCLE 1 + +/* Time from warning to reboot is 2 seconds */ +#define DEFAULT_SOFT_TO_HARD_MARGIN 2 + +#define MAX_TIME 170 + +#define DEFAULT_TIME 5 + +#define MAX_SOFT_TO_HARD_MARGIN (MAX_TIME-MIN_TIME_CYCLE) + +/* Ajustment to clock tick frequency to make timing come out right */ +#define FREQ_ADJUSTMENT 8 + +struct intel_scu_watchdog_dev { + ulong driver_open; + ulong driver_closed; + u32 timer_started; + u32 timer_set; + u32 threshold; + u32 soft_threshold; + u32 __iomem *timer_load_count_addr; + u32 __iomem *timer_current_value_addr; + u32 __iomem *timer_control_addr; + u32 __iomem *timer_clear_interrupt_addr; + u32 __iomem *timer_interrupt_status_addr; + struct sfi_timer_table_entry *timer_tbl_ptr; + struct notifier_block intel_scu_notifier; + struct miscdevice miscdev; +}; + +extern int sfi_mtimer_num; + +/* extern struct sfi_timer_table_entry *sfi_get_mtmr(int hint); */ +#endif /* __INTEL_SCU_WATCHDOG_H */ diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c index b32c6c045b1a..6143f52ba6b8 100644 --- a/drivers/watchdog/it8712f_wdt.c +++ b/drivers/watchdog/it8712f_wdt.c @@ -69,7 +69,7 @@ static unsigned short address; #define IT8712F_DEVID 0x8712 #define LDN_GPIO 0x07 /* GPIO and Watch Dog Timer */ -#define LDN_GAME 0x09 /* Game Port */ +#define LDN_GAME 0x09 /* Game Port */ #define WDT_CONTROL 0x71 /* WDT Register: Control */ #define WDT_CONFIG 0x72 /* WDT Register: Configuration */ diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c index dad29245a6a7..b1bc72f9a209 100644 --- a/drivers/watchdog/it87_wdt.c +++ b/drivers/watchdog/it87_wdt.c @@ -12,7 +12,7 @@ * http://www.ite.com.tw/ * * Support of the watchdog timers, which are available on - * IT8702, IT8712, IT8716, IT8718, IT8720 and IT8726. + * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721 and IT8726. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -45,7 +45,7 @@ #include <asm/system.h> -#define WATCHDOG_VERSION "1.13" +#define WATCHDOG_VERSION "1.14" #define WATCHDOG_NAME "IT87 WDT" #define PFX WATCHDOG_NAME ": " #define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n" @@ -54,7 +54,7 @@ /* Defaults for Module Parameter */ #define DEFAULT_NOGAMEPORT 0 #define DEFAULT_EXCLUSIVE 1 -#define DEFAULT_TIMEOUT 60 +#define DEFAULT_TIMEOUT 60 #define DEFAULT_TESTMODE 0 #define DEFAULT_NOWAYOUT WATCHDOG_NOWAYOUT @@ -70,9 +70,9 @@ /* Configuration Registers and Functions */ #define LDNREG 0x07 #define CHIPID 0x20 -#define CHIPREV 0x22 +#define CHIPREV 0x22 #define ACTREG 0x30 -#define BASEREG 0x60 +#define BASEREG 0x60 /* Chip Id numbers */ #define NO_DEV_ID 0xffff @@ -82,10 +82,11 @@ #define IT8716_ID 0x8716 #define IT8718_ID 0x8718 #define IT8720_ID 0x8720 +#define IT8721_ID 0x8721 #define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */ /* GPIO Configuration Registers LDN=0x07 */ -#define WDTCTRL 0x71 +#define WDTCTRL 0x71 #define WDTCFG 0x72 #define WDTVALLSB 0x73 #define WDTVALMSB 0x74 @@ -94,7 +95,7 @@ #define WDT_CIRINT 0x80 #define WDT_MOUSEINT 0x40 #define WDT_KYBINT 0x20 -#define WDT_GAMEPORT 0x10 /* not in it8718, it8720 */ +#define WDT_GAMEPORT 0x10 /* not in it8718, it8720, it8721 */ #define WDT_FORCE 0x02 #define WDT_ZERO 0x01 @@ -102,11 +103,11 @@ #define WDT_TOV1 0x80 #define WDT_KRST 0x40 #define WDT_TOVE 0x20 -#define WDT_PWROK 0x10 +#define WDT_PWROK 0x10 /* not in it8721 */ #define WDT_INT_MASK 0x0f /* CIR Configuration Register LDN=0x0a */ -#define CIR_ILS 0x70 +#define CIR_ILS 0x70 /* The default Base address is not always available, we use this */ #define CIR_BASE 0x0208 @@ -134,7 +135,7 @@ #define WDTS_USE_GP 4 #define WDTS_EXPECTED 5 -static unsigned int base, gpact, ciract, max_units; +static unsigned int base, gpact, ciract, max_units, chip_type; static unsigned long wdt_status; static DEFINE_SPINLOCK(spinlock); @@ -215,7 +216,7 @@ static inline void superio_outw(int val, int reg) /* Internal function, should be called after superio_select(GPIO) */ static void wdt_update_timeout(void) { - unsigned char cfg = WDT_KRST | WDT_PWROK; + unsigned char cfg = WDT_KRST; int tm = timeout; if (testmode) @@ -226,6 +227,9 @@ static void wdt_update_timeout(void) else tm /= 60; + if (chip_type != IT8721_ID) + cfg |= WDT_PWROK; + superio_outb(cfg, WDTCFG); superio_outb(tm, WDTVALLSB); if (max_units > 255) @@ -555,7 +559,6 @@ static int __init it87_wdt_init(void) { int rc = 0; int try_gameport = !nogameport; - u16 chip_type; u8 chip_rev; unsigned long flags; @@ -581,6 +584,7 @@ static int __init it87_wdt_init(void) break; case IT8718_ID: case IT8720_ID: + case IT8721_ID: max_units = 65535; try_gameport = 0; break; diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c new file mode 100644 index 000000000000..684ba01fb540 --- /dev/null +++ b/drivers/watchdog/jz4740_wdt.c @@ -0,0 +1,322 @@ +/* + * Copyright (C) 2010, Paul Cercueil <paul@crapouillou.net> + * JZ4740 Watchdog driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/watchdog.h> +#include <linux/init.h> +#include <linux/bitops.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include <linux/io.h> +#include <linux/device.h> +#include <linux/clk.h> +#include <linux/slab.h> + +#include <asm/mach-jz4740/timer.h> + +#define JZ_REG_WDT_TIMER_DATA 0x0 +#define JZ_REG_WDT_COUNTER_ENABLE 0x4 +#define JZ_REG_WDT_TIMER_COUNTER 0x8 +#define JZ_REG_WDT_TIMER_CONTROL 0xC + +#define JZ_WDT_CLOCK_PCLK 0x1 +#define JZ_WDT_CLOCK_RTC 0x2 +#define JZ_WDT_CLOCK_EXT 0x4 + +#define WDT_IN_USE 0 +#define WDT_OK_TO_CLOSE 1 + +#define JZ_WDT_CLOCK_DIV_SHIFT 3 + +#define JZ_WDT_CLOCK_DIV_1 (0 << JZ_WDT_CLOCK_DIV_SHIFT) +#define JZ_WDT_CLOCK_DIV_4 (1 << JZ_WDT_CLOCK_DIV_SHIFT) +#define JZ_WDT_CLOCK_DIV_16 (2 << JZ_WDT_CLOCK_DIV_SHIFT) +#define JZ_WDT_CLOCK_DIV_64 (3 << JZ_WDT_CLOCK_DIV_SHIFT) +#define JZ_WDT_CLOCK_DIV_256 (4 << JZ_WDT_CLOCK_DIV_SHIFT) +#define JZ_WDT_CLOCK_DIV_1024 (5 << JZ_WDT_CLOCK_DIV_SHIFT) + +#define DEFAULT_HEARTBEAT 5 +#define MAX_HEARTBEAT 2048 + +static struct { + void __iomem *base; + struct resource *mem; + struct clk *rtc_clk; + unsigned long status; +} jz4740_wdt; + +static int heartbeat = DEFAULT_HEARTBEAT; + + +static void jz4740_wdt_service(void) +{ + writew(0x0, jz4740_wdt.base + JZ_REG_WDT_TIMER_COUNTER); +} + +static void jz4740_wdt_set_heartbeat(int new_heartbeat) +{ + unsigned int rtc_clk_rate; + unsigned int timeout_value; + unsigned short clock_div = JZ_WDT_CLOCK_DIV_1; + + heartbeat = new_heartbeat; + + rtc_clk_rate = clk_get_rate(jz4740_wdt.rtc_clk); + + timeout_value = rtc_clk_rate * heartbeat; + while (timeout_value > 0xffff) { + if (clock_div == JZ_WDT_CLOCK_DIV_1024) { + /* Requested timeout too high; + * use highest possible value. */ + timeout_value = 0xffff; + break; + } + timeout_value >>= 2; + clock_div += (1 << JZ_WDT_CLOCK_DIV_SHIFT); + } + + writeb(0x0, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE); + writew(clock_div, jz4740_wdt.base + JZ_REG_WDT_TIMER_CONTROL); + + writew((u16)timeout_value, jz4740_wdt.base + JZ_REG_WDT_TIMER_DATA); + writew(0x0, jz4740_wdt.base + JZ_REG_WDT_TIMER_COUNTER); + writew(clock_div | JZ_WDT_CLOCK_RTC, + jz4740_wdt.base + JZ_REG_WDT_TIMER_CONTROL); + + writeb(0x1, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE); +} + +static void jz4740_wdt_enable(void) +{ + jz4740_timer_enable_watchdog(); + jz4740_wdt_set_heartbeat(heartbeat); +} + +static void jz4740_wdt_disable(void) +{ + jz4740_timer_disable_watchdog(); + writeb(0x0, jz4740_wdt.base + JZ_REG_WDT_COUNTER_ENABLE); +} + +static int jz4740_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(WDT_IN_USE, &jz4740_wdt.status)) + return -EBUSY; + + jz4740_wdt_enable(); + + return nonseekable_open(inode, file); +} + +static ssize_t jz4740_wdt_write(struct file *file, const char *data, + size_t len, loff_t *ppos) +{ + if (len) { + size_t i; + + clear_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status); + for (i = 0; i != len; i++) { + char c; + + if (get_user(c, data + i)) + return -EFAULT; + + if (c == 'V') + set_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status); + } + jz4740_wdt_service(); + } + + return len; +} + +static const struct watchdog_info ident = { + .options = WDIOF_KEEPALIVEPING, + .identity = "jz4740 Watchdog", +}; + +static long jz4740_wdt_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int ret = -ENOTTY; + int heartbeat_seconds; + + switch (cmd) { + case WDIOC_GETSUPPORT: + ret = copy_to_user((struct watchdog_info *)arg, &ident, + sizeof(ident)) ? -EFAULT : 0; + break; + + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + ret = put_user(0, (int *)arg); + break; + + case WDIOC_KEEPALIVE: + jz4740_wdt_service(); + return 0; + + case WDIOC_SETTIMEOUT: + if (get_user(heartbeat_seconds, (int __user *)arg)) + return -EFAULT; + + jz4740_wdt_set_heartbeat(heartbeat_seconds); + return 0; + + case WDIOC_GETTIMEOUT: + return put_user(heartbeat, (int *)arg); + + default: + break; + } + + return ret; +} + +static int jz4740_wdt_release(struct inode *inode, struct file *file) +{ + jz4740_wdt_service(); + + if (test_and_clear_bit(WDT_OK_TO_CLOSE, &jz4740_wdt.status)) + jz4740_wdt_disable(); + + clear_bit(WDT_IN_USE, &jz4740_wdt.status); + return 0; +} + +static const struct file_operations jz4740_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = jz4740_wdt_write, + .unlocked_ioctl = jz4740_wdt_ioctl, + .open = jz4740_wdt_open, + .release = jz4740_wdt_release, +}; + +static struct miscdevice jz4740_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &jz4740_wdt_fops, +}; + +static int __devinit jz4740_wdt_probe(struct platform_device *pdev) +{ + int ret = 0, size; + struct resource *res; + struct device *dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(dev, "failed to get memory region resource\n"); + return -ENXIO; + } + + size = resource_size(res); + jz4740_wdt.mem = request_mem_region(res->start, size, pdev->name); + if (jz4740_wdt.mem == NULL) { + dev_err(dev, "failed to get memory region\n"); + return -EBUSY; + } + + jz4740_wdt.base = ioremap_nocache(res->start, size); + if (jz4740_wdt.base == NULL) { + dev_err(dev, "failed to map memory region\n"); + ret = -EBUSY; + goto err_release_region; + } + + jz4740_wdt.rtc_clk = clk_get(NULL, "rtc"); + if (IS_ERR(jz4740_wdt.rtc_clk)) { + dev_err(dev, "cannot find RTC clock\n"); + ret = PTR_ERR(jz4740_wdt.rtc_clk); + goto err_iounmap; + } + + ret = misc_register(&jz4740_wdt_miscdev); + if (ret < 0) { + dev_err(dev, "cannot register misc device\n"); + goto err_disable_clk; + } + + return 0; + +err_disable_clk: + clk_put(jz4740_wdt.rtc_clk); +err_iounmap: + iounmap(jz4740_wdt.base); +err_release_region: + release_mem_region(jz4740_wdt.mem->start, + resource_size(jz4740_wdt.mem)); + return ret; +} + + +static int __devexit jz4740_wdt_remove(struct platform_device *pdev) +{ + jz4740_wdt_disable(); + misc_deregister(&jz4740_wdt_miscdev); + clk_put(jz4740_wdt.rtc_clk); + + iounmap(jz4740_wdt.base); + jz4740_wdt.base = NULL; + + release_mem_region(jz4740_wdt.mem->start, + resource_size(jz4740_wdt.mem)); + jz4740_wdt.mem = NULL; + + return 0; +} + + +static struct platform_driver jz4740_wdt_driver = { + .probe = jz4740_wdt_probe, + .remove = __devexit_p(jz4740_wdt_remove), + .driver = { + .name = "jz4740-wdt", + .owner = THIS_MODULE, + }, +}; + + +static int __init jz4740_wdt_init(void) +{ + return platform_driver_register(&jz4740_wdt_driver); +} +module_init(jz4740_wdt_init); + +static void __exit jz4740_wdt_exit(void) +{ + platform_driver_unregister(&jz4740_wdt_driver); +} +module_exit(jz4740_wdt_exit); + +MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>"); +MODULE_DESCRIPTION("jz4740 Watchdog Driver"); + +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, + "Watchdog heartbeat period in seconds from 1 to " + __MODULE_STRING(MAX_HEARTBEAT) ", default " + __MODULE_STRING(DEFAULT_HEARTBEAT)); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); +MODULE_ALIAS("platform:jz4740-wdt"); diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c index 928035069396..1332b838cc58 100644 --- a/drivers/watchdog/machzwd.c +++ b/drivers/watchdog/machzwd.c @@ -54,7 +54,7 @@ /* indexes */ /* size */ #define ZFL_VERSION 0x02 /* 16 */ -#define CONTROL 0x10 /* 16 */ +#define CONTROL 0x10 /* 16 */ #define STATUS 0x12 /* 8 */ #define COUNTER_1 0x0C /* 16 */ #define COUNTER_2 0x0E /* 8 */ diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 3053ff05ca41..7a82ce5a6337 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c @@ -41,7 +41,7 @@ static int nowayout = WATCHDOG_NOWAYOUT; * to ping the watchdog. */ #define MAX6369_WDSET (7 << 0) -#define MAX6369_WDI (1 << 3) +#define MAX6369_WDI (1 << 3) static DEFINE_SPINLOCK(io_lock); diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index ea438ad53055..6709d723e017 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c @@ -2,9 +2,9 @@ * mpc8xxx_wdt.c - MPC8xx/MPC83xx/MPC86xx watchdog userspace interface * * Authors: Dave Updegraff <dave@cray.org> - * Kumar Gala <galak@kernel.crashing.org> - * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org> - * ..and from sc520_wdt + * Kumar Gala <galak@kernel.crashing.org> + * Attribution: from 83xx_wst: Florian Schirmer <jolt@tuxbox.org> + * ..and from sc520_wdt * Copyright (c) 2008 MontaVista Software, Inc. * Anton Vorontsov <avorontsov@ru.mvista.com> * diff --git a/drivers/watchdog/mpcore_wdt.c b/drivers/watchdog/mpcore_wdt.c index b8ec7aca3c8e..2b4af222b5f2 100644 --- a/drivers/watchdog/mpcore_wdt.c +++ b/drivers/watchdog/mpcore_wdt.c @@ -172,7 +172,7 @@ static int mpcore_wdt_release(struct inode *inode, struct file *file) /* * Shut off the timer. - * Lock it in if it's a module and we set nowayout + * Lock it in if it's a module and we set nowayout */ if (wdt->expect_close == 42) mpcore_wdt_stop(wdt); diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c index 08e8a6ab74e1..5ec5ac1f7878 100644 --- a/drivers/watchdog/mtx-1_wdt.c +++ b/drivers/watchdog/mtx-1_wdt.c @@ -190,19 +190,19 @@ static ssize_t mtx1_wdt_write(struct file *file, const char *buf, } static const struct file_operations mtx1_wdt_fops = { - .owner = THIS_MODULE, + .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = mtx1_wdt_ioctl, - .open = mtx1_wdt_open, - .write = mtx1_wdt_write, - .release = mtx1_wdt_release, + .open = mtx1_wdt_open, + .write = mtx1_wdt_write, + .release = mtx1_wdt_release, }; static struct miscdevice mtx1_wdt_misc = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &mtx1_wdt_fops, + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &mtx1_wdt_fops, }; diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c index 1a50aa7079bf..267377a5a83e 100644 --- a/drivers/watchdog/nv_tco.c +++ b/drivers/watchdog/nv_tco.c @@ -289,7 +289,7 @@ static struct miscdevice nv_tco_miscdev = { * register a pci_driver, because someone else might one day * want to register another driver on the same PCI id. */ -static struct pci_device_id tco_pci_tbl[] = { +static DEFINE_PCI_DEVICE_TABLE(tco_pci_tbl) = { { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS, diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index 3dd4971160ef..2b4acb86c191 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -124,6 +124,8 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev) u32 pre_margin = GET_WLDR_VAL(timer_margin); void __iomem *base = wdev->base; + pm_runtime_get_sync(wdev->dev); + /* just count up at 32 KHz */ while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04) cpu_relax(); @@ -131,6 +133,8 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev) __raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR); while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04) cpu_relax(); + + pm_runtime_put_sync(wdev->dev); } /* @@ -160,6 +164,8 @@ static int omap_wdt_open(struct inode *inode, struct file *file) omap_wdt_ping(wdev); /* trigger loading of new timeout value */ omap_wdt_enable(wdev); + pm_runtime_put_sync(wdev->dev); + return nonseekable_open(inode, file); } @@ -171,6 +177,7 @@ static int omap_wdt_release(struct inode *inode, struct file *file) * Shut off the timer unless NOWAYOUT is defined. */ #ifndef CONFIG_WATCHDOG_NOWAYOUT + pm_runtime_get_sync(wdev->dev); omap_wdt_disable(wdev); @@ -190,9 +197,11 @@ static ssize_t omap_wdt_write(struct file *file, const char __user *data, /* Refresh LOAD_TIME. */ if (len) { + pm_runtime_get_sync(wdev->dev); spin_lock(&wdt_lock); omap_wdt_ping(wdev); spin_unlock(&wdt_lock); + pm_runtime_put_sync(wdev->dev); } return len; } @@ -224,15 +233,18 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd, return put_user(omap_prcm_get_reset_sources(), (int __user *)arg); case WDIOC_KEEPALIVE: + pm_runtime_get_sync(wdev->dev); spin_lock(&wdt_lock); omap_wdt_ping(wdev); spin_unlock(&wdt_lock); + pm_runtime_put_sync(wdev->dev); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_margin, (int __user *)arg)) return -EFAULT; omap_wdt_adjust_timeout(new_margin); + pm_runtime_get_sync(wdev->dev); spin_lock(&wdt_lock); omap_wdt_disable(wdev); omap_wdt_set_timeout(wdev); @@ -240,6 +252,7 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd, omap_wdt_ping(wdev); spin_unlock(&wdt_lock); + pm_runtime_put_sync(wdev->dev); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(timer_margin, (int __user *)arg); @@ -345,8 +358,11 @@ static void omap_wdt_shutdown(struct platform_device *pdev) { struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); - if (wdev->omap_wdt_users) + if (wdev->omap_wdt_users) { + pm_runtime_get_sync(wdev->dev); omap_wdt_disable(wdev); + pm_runtime_put_sync(wdev->dev); + } } static int __devexit omap_wdt_remove(struct platform_device *pdev) @@ -381,8 +397,11 @@ static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state) { struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); - if (wdev->omap_wdt_users) + if (wdev->omap_wdt_users) { + pm_runtime_get_sync(wdev->dev); omap_wdt_disable(wdev); + pm_runtime_put_sync(wdev->dev); + } return 0; } @@ -392,8 +411,10 @@ static int omap_wdt_resume(struct platform_device *pdev) struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); if (wdev->omap_wdt_users) { + pm_runtime_get_sync(wdev->dev); omap_wdt_enable(wdev); omap_wdt_ping(wdev); + pm_runtime_put_sync(wdev->dev); } return 0; diff --git a/drivers/watchdog/omap_wdt.h b/drivers/watchdog/omap_wdt.h index fc02ec6a0386..09b774cf75b9 100644 --- a/drivers/watchdog/omap_wdt.h +++ b/drivers/watchdog/omap_wdt.h @@ -44,7 +44,7 @@ * months before firing. These limits work without scaling, * with the 60 second default assumed by most tools and docs. */ -#define TIMER_MARGIN_MAX (24 * 60 * 60) /* 1 day */ +#define TIMER_MARGIN_MAX (24 * 60 * 60) /* 1 day */ #define TIMER_MARGIN_DEFAULT 60 /* 60 secs */ #define TIMER_MARGIN_MIN 1 diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c index 3a56bc360924..139d773300c6 100644 --- a/drivers/watchdog/pc87413_wdt.c +++ b/drivers/watchdog/pc87413_wdt.c @@ -514,7 +514,7 @@ static struct miscdevice pc87413_miscdev = { /* -- Module init functions -------------------------------------*/ /** - * pc87413_init: module's "constructor" + * pc87413_init: module's "constructor" * * Set up the WDT watchdog board. All we have to do is grab the * resources we require and bitch if anyone beat us to them. diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c index 64374d636f09..b8d14f88f0b5 100644 --- a/drivers/watchdog/pcwd_pci.c +++ b/drivers/watchdog/pcwd_pci.c @@ -817,7 +817,7 @@ static void __devexit pcipcwd_card_exit(struct pci_dev *pdev) cards_found--; } -static struct pci_device_id pcipcwd_pci_tbl[] = { +static DEFINE_PCI_DEVICE_TABLE(pcipcwd_pci_tbl) = { { PCI_VENDOR_ID_QUICKLOGIC, PCI_DEVICE_ID_WATCHDOG_PCIPCWD, PCI_ANY_ID, PCI_ANY_ID, }, { 0 }, /* End of list */ diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c index bf5b97c546eb..c7cf4cbf8ab3 100644 --- a/drivers/watchdog/pnx4008_wdt.c +++ b/drivers/watchdog/pnx4008_wdt.c @@ -4,7 +4,7 @@ * Watchdog driver for PNX4008 board * * Authors: Dmitry Chigirev <source@mvista.com>, - * Vitaly Wool <vitalywool@gmail.com> + * Vitaly Wool <vitalywool@gmail.com> * Based on sa1100 driver, * Copyright (C) 2000 Oleg Drokin <green@crimea.edu> * diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index ae53662c29bc..25b39bf35925 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -224,7 +224,7 @@ static int s3c2410wdt_release(struct inode *inode, struct file *file) { /* * Shut off the timer. - * Lock it in if it's a module and we set nowayout + * Lock it in if it's a module and we set nowayout */ if (expect_close == 42) diff --git a/drivers/watchdog/sbc8360.c b/drivers/watchdog/sbc8360.c index 68e2e2d6f73d..514ec23050f7 100644 --- a/drivers/watchdog/sbc8360.c +++ b/drivers/watchdog/sbc8360.c @@ -114,7 +114,7 @@ static char expect_close; * C | 6.5s 65s 650s 1300s * D | 7s 70s 700s 1400s * E | 7.5s 75s 750s 1500s - * F | 8s 80s 800s 1600s + * F | 8s 80s 800s 1600s * * Another way to say the same things is: * For N=1, Timeout = (M+1) * 0.5s diff --git a/drivers/watchdog/sbc_epx_c3.c b/drivers/watchdog/sbc_epx_c3.c index 28f1214457bd..3066a5127ca8 100644 --- a/drivers/watchdog/sbc_epx_c3.c +++ b/drivers/watchdog/sbc_epx_c3.c @@ -220,7 +220,7 @@ module_exit(watchdog_exit); MODULE_AUTHOR("Calin A. Culianu <calin@ajvar.org>"); MODULE_DESCRIPTION("Hardware Watchdog Device for Winsystems EPX-C3 SBC. " "Note that there is no way to probe for this device -- " - "so only use it if you are *sure* you are runnning on this specific " + "so only use it if you are *sure* you are running on this specific " "SBC system from Winsystems! It writes to IO ports 0x1ee and 0x1ef!"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c index 79906255eeb6..d5d399464599 100644 --- a/drivers/watchdog/sbc_fitpc2_wdt.c +++ b/drivers/watchdog/sbc_fitpc2_wdt.c @@ -41,7 +41,7 @@ static DEFINE_MUTEX(wdt_lock); #define IFACE_ON_COMMAND 1 #define REBOOT_COMMAND 2 -#define WATCHDOG_NAME "SBC-FITPC2 Watchdog" +#define WATCHDOG_NAME "SBC-FITPC2 Watchdog" static void wdt_send_data(unsigned char command, unsigned char data) { diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c index 8a1f0bc3e271..df88cfa05f35 100644 --- a/drivers/watchdog/smsc37b787_wdt.c +++ b/drivers/watchdog/smsc37b787_wdt.c @@ -434,11 +434,11 @@ static long wb_smsc_wdt_ioctl(struct file *file, } uarg; static const struct watchdog_info ident = { - .options = WDIOF_KEEPALIVEPING | + .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 0, - .identity = "SMsC 37B787 Watchdog", + .identity = "SMsC 37B787 Watchdog", }; uarg.i = (int __user *)arg; diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index 833f49f43d43..100b114e3c3c 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -151,7 +151,7 @@ static int softdog_release(struct inode *inode, struct file *file) { /* * Shut off the timer. - * Lock it in if it's a module and we set nowayout + * Lock it in if it's a module and we set nowayout */ if (expect_close == 42) { softdog_stop(); diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 808372883e88..1bc493848ed4 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -259,7 +259,7 @@ static struct miscdevice sp5100_tco_miscdev = { * register a pci_driver, because someone else might * want to register another driver on the same PCI id. */ -static struct pci_device_id sp5100_tco_pci_tbl[] = { +static DEFINE_PCI_DEVICE_TABLE(sp5100_tco_pci_tbl) = { { PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID, PCI_ANY_ID, }, { 0, }, /* End of list */ diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index 9127eda2145b..0a0efe713bc8 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -278,7 +278,7 @@ static struct miscdevice sp805_wdt_miscdev = { }; static int __devinit -sp805_wdt_probe(struct amba_device *adev, struct amba_id *id) +sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id) { int ret = 0; diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index 18cdeb4c4258..5a90a4a871dd 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c @@ -68,7 +68,7 @@ struct platform_device *ts72xx_wdt_pdev; * to control register): * value description * ------------------------- - * 0x00 watchdog disabled + * 0x00 watchdog disabled * 0x01 250ms * 0x02 500ms * 0x03 1s diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c index df2a64dc9672..be9c4d839e15 100644 --- a/drivers/watchdog/w83697ug_wdt.c +++ b/drivers/watchdog/w83697ug_wdt.c @@ -87,10 +87,10 @@ static int w83697ug_select_wd_register(void) outb_p(0x87, WDT_EFER); /* Enter extended function mode */ outb_p(0x87, WDT_EFER); /* Again according to manual */ - outb(0x20, WDT_EFER); /* check chip version */ + outb(0x20, WDT_EFER); /* check chip version */ version = inb(WDT_EFDR); - if (version == 0x68) { /* W83697UG */ + if (version == 0x68) { /* W83697UG */ printk(KERN_INFO PFX "Watchdog chip version 0x%02x = " "W83697UG/UF found at 0x%04x\n", version, wdt_io); diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c index 552a4381e78f..bb03e151a1d0 100644 --- a/drivers/watchdog/wdt.c +++ b/drivers/watchdog/wdt.c @@ -581,7 +581,7 @@ static void __exit wdt_exit(void) } /** - * wdt_init: + * wdt_init: * * Set up the WDT watchdog board. All we have to do is grab the * resources we require and bitch if anyone beat us to them. diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c index 5c2521fc836c..a2f01c9f5c34 100644 --- a/drivers/watchdog/wdt977.c +++ b/drivers/watchdog/wdt977.c @@ -281,7 +281,7 @@ static int wdt977_release(struct inode *inode, struct file *file) { /* * Shut off the timer. - * Lock it in if it's a module and we set nowayout + * Lock it in if it's a module and we set nowayout */ if (expect_close == 42) { wdt977_stop(); diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c index 6130c88fa5ac..172dad6c7693 100644 --- a/drivers/watchdog/wdt_pci.c +++ b/drivers/watchdog/wdt_pci.c @@ -31,7 +31,7 @@ * Jeff Garzik : PCI cleanups * Tigran Aivazian : Restructured wdtpci_init_one() to handle * failures - * Joel Becker : Added WDIOC_GET/SETTIMEOUT + * Joel Becker : Added WDIOC_GET/SETTIMEOUT * Zwane Mwaikambo : Magic char closing, locking changes, * cleanups * Matt Domsch : nowayout module option @@ -727,7 +727,7 @@ static void __devexit wdtpci_remove_one(struct pci_dev *pdev) } -static struct pci_device_id wdtpci_pci_tbl[] = { +static DEFINE_PCI_DEVICE_TABLE(wdtpci_pci_tbl) = { { .vendor = PCI_VENDOR_ID_ACCESSIO, .device = PCI_DEVICE_ID_ACCESSIO_WDG_CSM, @@ -764,7 +764,7 @@ static void __exit wdtpci_cleanup(void) /** - * wdtpci_init: + * wdtpci_init: * * Set up the WDT watchdog board. All we have to do is grab the * resources we require and bitch if anyone beat us to them. diff --git a/drivers/watchdog/xen_wdt.c b/drivers/watchdog/xen_wdt.c new file mode 100644 index 000000000000..49bd9d395562 --- /dev/null +++ b/drivers/watchdog/xen_wdt.c @@ -0,0 +1,359 @@ +/* + * Xen Watchdog Driver + * + * (c) Copyright 2010 Novell, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#define DRV_NAME "wdt" +#define DRV_VERSION "0.01" +#define PFX DRV_NAME ": " + +#include <linux/bug.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/hrtimer.h> +#include <linux/kernel.h> +#include <linux/ktime.h> +#include <linux/init.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include <linux/watchdog.h> +#include <xen/xen.h> +#include <asm/xen/hypercall.h> +#include <xen/interface/sched.h> + +static struct platform_device *platform_device; +static DEFINE_SPINLOCK(wdt_lock); +static struct sched_watchdog wdt; +static __kernel_time_t wdt_expires; +static bool is_active, expect_release; + +#define WATCHDOG_TIMEOUT 60 /* in seconds */ +static unsigned int timeout = WATCHDOG_TIMEOUT; +module_param(timeout, uint, S_IRUGO); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds " + "(default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, S_IRUGO); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " + "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static inline __kernel_time_t set_timeout(void) +{ + wdt.timeout = timeout; + return ktime_to_timespec(ktime_get()).tv_sec + timeout; +} + +static int xen_wdt_start(void) +{ + __kernel_time_t expires; + int err; + + spin_lock(&wdt_lock); + + expires = set_timeout(); + if (!wdt.id) + err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt); + else + err = -EBUSY; + if (err > 0) { + wdt.id = err; + wdt_expires = expires; + err = 0; + } else + BUG_ON(!err); + + spin_unlock(&wdt_lock); + + return err; +} + +static int xen_wdt_stop(void) +{ + int err = 0; + + spin_lock(&wdt_lock); + + wdt.timeout = 0; + if (wdt.id) + err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt); + if (!err) + wdt.id = 0; + + spin_unlock(&wdt_lock); + + return err; +} + +static int xen_wdt_kick(void) +{ + __kernel_time_t expires; + int err; + + spin_lock(&wdt_lock); + + expires = set_timeout(); + if (wdt.id) + err = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wdt); + else + err = -ENXIO; + if (!err) + wdt_expires = expires; + + spin_unlock(&wdt_lock); + + return err; +} + +static int xen_wdt_open(struct inode *inode, struct file *file) +{ + int err; + + /* /dev/watchdog can only be opened once */ + if (xchg(&is_active, true)) + return -EBUSY; + + err = xen_wdt_start(); + if (err == -EBUSY) + err = xen_wdt_kick(); + return err ?: nonseekable_open(inode, file); +} + +static int xen_wdt_release(struct inode *inode, struct file *file) +{ + if (expect_release) + xen_wdt_stop(); + else { + printk(KERN_CRIT PFX + "unexpected close, not stopping watchdog!\n"); + xen_wdt_kick(); + } + is_active = false; + expect_release = false; + return 0; +} + +static ssize_t xen_wdt_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + /* See if we got the magic character 'V' and reload the timer */ + if (len) { + if (!nowayout) { + size_t i; + + /* in case it was set long ago */ + expect_release = false; + + /* scan to see whether or not we got the magic + character */ + for (i = 0; i != len; i++) { + char c; + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + expect_release = true; + } + } + + /* someone wrote to us, we should reload the timer */ + xen_wdt_kick(); + } + return len; +} + +static long xen_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int new_options, retval = -EINVAL; + int new_timeout; + int __user *argp = (void __user *)arg; + static const struct watchdog_info ident = { + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, + .firmware_version = 0, + .identity = DRV_NAME, + }; + + switch (cmd) { + case WDIOC_GETSUPPORT: + return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; + + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0, argp); + + case WDIOC_SETOPTIONS: + if (get_user(new_options, argp)) + return -EFAULT; + + if (new_options & WDIOS_DISABLECARD) + retval = xen_wdt_stop(); + if (new_options & WDIOS_ENABLECARD) { + retval = xen_wdt_start(); + if (retval == -EBUSY) + retval = xen_wdt_kick(); + } + return retval; + + case WDIOC_KEEPALIVE: + xen_wdt_kick(); + return 0; + + case WDIOC_SETTIMEOUT: + if (get_user(new_timeout, argp)) + return -EFAULT; + if (!new_timeout) + return -EINVAL; + timeout = new_timeout; + xen_wdt_kick(); + /* fall through */ + case WDIOC_GETTIMEOUT: + return put_user(timeout, argp); + + case WDIOC_GETTIMELEFT: + retval = wdt_expires - ktime_to_timespec(ktime_get()).tv_sec; + return put_user(retval, argp); + } + + return -ENOTTY; +} + +static const struct file_operations xen_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = xen_wdt_write, + .unlocked_ioctl = xen_wdt_ioctl, + .open = xen_wdt_open, + .release = xen_wdt_release, +}; + +static struct miscdevice xen_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &xen_wdt_fops, +}; + +static int __devinit xen_wdt_probe(struct platform_device *dev) +{ + struct sched_watchdog wd = { .id = ~0 }; + int ret = HYPERVISOR_sched_op(SCHEDOP_watchdog, &wd); + + switch (ret) { + case -EINVAL: + if (!timeout) { + timeout = WATCHDOG_TIMEOUT; + printk(KERN_INFO PFX + "timeout value invalid, using %d\n", timeout); + } + + ret = misc_register(&xen_wdt_miscdev); + if (ret) { + printk(KERN_ERR PFX + "cannot register miscdev on minor=%d (%d)\n", + WATCHDOG_MINOR, ret); + break; + } + + printk(KERN_INFO PFX + "initialized (timeout=%ds, nowayout=%d)\n", + timeout, nowayout); + break; + + case -ENOSYS: + printk(KERN_INFO PFX "not supported\n"); + ret = -ENODEV; + break; + + default: + printk(KERN_INFO PFX "bogus return value %d\n", ret); + break; + } + + return ret; +} + +static int __devexit xen_wdt_remove(struct platform_device *dev) +{ + /* Stop the timer before we leave */ + if (!nowayout) + xen_wdt_stop(); + + misc_deregister(&xen_wdt_miscdev); + + return 0; +} + +static void xen_wdt_shutdown(struct platform_device *dev) +{ + xen_wdt_stop(); +} + +static int xen_wdt_suspend(struct platform_device *dev, pm_message_t state) +{ + return xen_wdt_stop(); +} + +static int xen_wdt_resume(struct platform_device *dev) +{ + return xen_wdt_start(); +} + +static struct platform_driver xen_wdt_driver = { + .probe = xen_wdt_probe, + .remove = __devexit_p(xen_wdt_remove), + .shutdown = xen_wdt_shutdown, + .suspend = xen_wdt_suspend, + .resume = xen_wdt_resume, + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME, + }, +}; + +static int __init xen_wdt_init_module(void) +{ + int err; + + if (!xen_domain()) + return -ENODEV; + + printk(KERN_INFO PFX "Xen WatchDog Timer Driver v%s\n", DRV_VERSION); + + err = platform_driver_register(&xen_wdt_driver); + if (err) + return err; + + platform_device = platform_device_register_simple(DRV_NAME, + -1, NULL, 0); + if (IS_ERR(platform_device)) { + err = PTR_ERR(platform_device); + platform_driver_unregister(&xen_wdt_driver); + } + + return err; +} + +static void __exit xen_wdt_cleanup_module(void) +{ + platform_device_unregister(platform_device); + platform_driver_unregister(&xen_wdt_driver); + printk(KERN_INFO PFX "module unloaded\n"); +} + +module_init(xen_wdt_init_module); +module_exit(xen_wdt_cleanup_module); + +MODULE_AUTHOR("Jan Beulich <jbeulich@novell.com>"); +MODULE_DESCRIPTION("Xen WatchDog Timer Driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 07bec09d1dad..a59638b37c1a 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -76,10 +76,20 @@ config XEN_XENBUS_FRONTEND config XEN_GNTDEV tristate "userspace grant access device driver" depends on XEN + default m select MMU_NOTIFIER help Allows userspace processes to use grants. +config XEN_GRANT_DEV_ALLOC + tristate "User-space grant reference allocator driver" + depends on XEN + default m + help + Allows userspace processes to create pages with access granted + to other domains. This can be used to implement frontend drivers + or as part of an inter-domain shared memory channel. + config XEN_PLATFORM_PCI tristate "xen platform pci device driver" depends on XEN_PVHVM && PCI diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 5088cc2e6fe2..f420f1ff7f13 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -1,4 +1,4 @@ -obj-y += grant-table.o features.o events.o manage.o +obj-y += grant-table.o features.o events.o manage.o balloon.o obj-y += xenbus/ nostackp := $(call cc-option, -fno-stack-protector) @@ -7,9 +7,10 @@ CFLAGS_features.o := $(nostackp) obj-$(CONFIG_BLOCK) += biomerge.o obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_XEN_XENCOMM) += xencomm.o -obj-$(CONFIG_XEN_BALLOON) += balloon.o +obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o +obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o obj-$(CONFIG_XENFS) += xenfs/ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o @@ -18,5 +19,6 @@ obj-$(CONFIG_XEN_DOM0) += pci.o xen-evtchn-y := evtchn.o xen-gntdev-y := gntdev.o +xen-gntalloc-y := gntalloc.o xen-platform-pci-y := platform-pci.o diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 718050ace08f..043af8ad6b60 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -1,6 +1,4 @@ /****************************************************************************** - * balloon.c - * * Xen balloon driver - enables returning/claiming memory to/from Xen. * * Copyright (c) 2003, B Dragovic @@ -33,7 +31,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/mm.h> @@ -42,13 +39,11 @@ #include <linux/highmem.h> #include <linux/mutex.h> #include <linux/list.h> -#include <linux/sysdev.h> #include <linux/gfp.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> -#include <asm/uaccess.h> #include <asm/tlb.h> #include <asm/e820.h> @@ -58,35 +53,29 @@ #include <xen/xen.h> #include <xen/interface/xen.h> #include <xen/interface/memory.h> -#include <xen/xenbus.h> +#include <xen/balloon.h> #include <xen/features.h> #include <xen/page.h> -#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) - -#define BALLOON_CLASS_NAME "xen_memory" +/* + * balloon_process() state: + * + * BP_DONE: done or nothing to do, + * BP_EAGAIN: error, go to sleep, + * BP_ECANCELED: error, balloon operation canceled. + */ -struct balloon_stats { - /* We aim for 'current allocation' == 'target allocation'. */ - unsigned long current_pages; - unsigned long target_pages; - /* - * Drivers may alter the memory reservation independently, but they - * must inform the balloon driver so we avoid hitting the hard limit. - */ - unsigned long driver_pages; - /* Number of pages in high- and low-memory balloons. */ - unsigned long balloon_low; - unsigned long balloon_high; +enum bp_state { + BP_DONE, + BP_EAGAIN, + BP_ECANCELED }; -static DEFINE_MUTEX(balloon_mutex); - -static struct sys_device balloon_sysdev; -static int register_balloon(struct sys_device *sysdev); +static DEFINE_MUTEX(balloon_mutex); -static struct balloon_stats balloon_stats; +struct balloon_stats balloon_stats; +EXPORT_SYMBOL_GPL(balloon_stats); /* We increase/decrease in batches which fit in a page */ static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)]; @@ -104,8 +93,7 @@ static LIST_HEAD(ballooned_pages); /* Main work function, always executed in process context. */ static void balloon_process(struct work_struct *work); -static DECLARE_WORK(balloon_worker, balloon_process); -static struct timer_list balloon_timer; +static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); /* When ballooning out (allocating memory to return to Xen) we don't really want the kernel to try too hard since that can trigger the oom killer. */ @@ -140,14 +128,17 @@ static void balloon_append(struct page *page) } /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ -static struct page *balloon_retrieve(void) +static struct page *balloon_retrieve(bool prefer_highmem) { struct page *page; if (list_empty(&ballooned_pages)) return NULL; - page = list_entry(ballooned_pages.next, struct page, lru); + if (prefer_highmem) + page = list_entry(ballooned_pages.prev, struct page, lru); + else + page = list_entry(ballooned_pages.next, struct page, lru); list_del(&page->lru); if (PageHighMem(page)) { @@ -177,9 +168,29 @@ static struct page *balloon_next_page(struct page *page) return list_entry(next, struct page, lru); } -static void balloon_alarm(unsigned long unused) +static enum bp_state update_schedule(enum bp_state state) { - schedule_work(&balloon_worker); + if (state == BP_DONE) { + balloon_stats.schedule_delay = 1; + balloon_stats.retry_count = 1; + return BP_DONE; + } + + ++balloon_stats.retry_count; + + if (balloon_stats.max_retry_count != RETRY_UNLIMITED && + balloon_stats.retry_count > balloon_stats.max_retry_count) { + balloon_stats.schedule_delay = 1; + balloon_stats.retry_count = 1; + return BP_ECANCELED; + } + + balloon_stats.schedule_delay <<= 1; + + if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay) + balloon_stats.schedule_delay = balloon_stats.max_schedule_delay; + + return BP_EAGAIN; } static unsigned long current_target(void) @@ -194,11 +205,11 @@ static unsigned long current_target(void) return target; } -static int increase_reservation(unsigned long nr_pages) +static enum bp_state increase_reservation(unsigned long nr_pages) { + int rc; unsigned long pfn, i; struct page *page; - long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, @@ -210,7 +221,10 @@ static int increase_reservation(unsigned long nr_pages) page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { - BUG_ON(page == NULL); + if (!page) { + nr_pages = i; + break; + } frame_list[i] = page_to_pfn(page); page = balloon_next_page(page); } @@ -218,11 +232,11 @@ static int increase_reservation(unsigned long nr_pages) set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); - if (rc < 0) - goto out; + if (rc <= 0) + return BP_EAGAIN; for (i = 0; i < rc; i++) { - page = balloon_retrieve(); + page = balloon_retrieve(false); BUG_ON(page == NULL); pfn = page_to_pfn(page); @@ -249,15 +263,14 @@ static int increase_reservation(unsigned long nr_pages) balloon_stats.current_pages += rc; - out: - return rc < 0 ? rc : rc != nr_pages; + return BP_DONE; } -static int decrease_reservation(unsigned long nr_pages) +static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) { + enum bp_state state = BP_DONE; unsigned long pfn, i; struct page *page; - int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, @@ -269,9 +282,9 @@ static int decrease_reservation(unsigned long nr_pages) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { - if ((page = alloc_page(GFP_BALLOON)) == NULL) { + if ((page = alloc_page(gfp)) == NULL) { nr_pages = i; - need_sleep = 1; + state = BP_EAGAIN; break; } @@ -307,7 +320,7 @@ static int decrease_reservation(unsigned long nr_pages) balloon_stats.current_pages -= nr_pages; - return need_sleep; + return state; } /* @@ -318,77 +331,101 @@ static int decrease_reservation(unsigned long nr_pages) */ static void balloon_process(struct work_struct *work) { - int need_sleep = 0; + enum bp_state state = BP_DONE; long credit; mutex_lock(&balloon_mutex); do { credit = current_target() - balloon_stats.current_pages; + if (credit > 0) - need_sleep = (increase_reservation(credit) != 0); + state = increase_reservation(credit); + if (credit < 0) - need_sleep = (decrease_reservation(-credit) != 0); + state = decrease_reservation(-credit, GFP_BALLOON); + + state = update_schedule(state); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif - } while ((credit != 0) && !need_sleep); + } while (credit && state == BP_DONE); /* Schedule more work if there is some still to be done. */ - if (current_target() != balloon_stats.current_pages) - mod_timer(&balloon_timer, jiffies + HZ); + if (state == BP_EAGAIN) + schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); mutex_unlock(&balloon_mutex); } /* Resets the Xen limit, sets new target, and kicks off processing. */ -static void balloon_set_new_target(unsigned long target) +void balloon_set_new_target(unsigned long target) { /* No need for lock. Not read-modify-write updates. */ balloon_stats.target_pages = target; - schedule_work(&balloon_worker); + schedule_delayed_work(&balloon_worker, 0); } +EXPORT_SYMBOL_GPL(balloon_set_new_target); -static struct xenbus_watch target_watch = -{ - .node = "memory/target" -}; - -/* React to a change in the target key */ -static void watch_target(struct xenbus_watch *watch, - const char **vec, unsigned int len) +/** + * alloc_xenballooned_pages - get pages that have been ballooned out + * @nr_pages: Number of pages to get + * @pages: pages returned + * @return 0 on success, error otherwise + */ +int alloc_xenballooned_pages(int nr_pages, struct page** pages) { - unsigned long long new_target; - int err; - - err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); - if (err != 1) { - /* This is ok (for domain0 at least) - so just return */ - return; + int pgno = 0; + struct page* page; + mutex_lock(&balloon_mutex); + while (pgno < nr_pages) { + page = balloon_retrieve(true); + if (page) { + pages[pgno++] = page; + } else { + enum bp_state st; + st = decrease_reservation(nr_pages - pgno, GFP_HIGHUSER); + if (st != BP_DONE) + goto out_undo; + } } - - /* The given memory/target value is in KiB, so it needs converting to - * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. - */ - balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); + mutex_unlock(&balloon_mutex); + return 0; + out_undo: + while (pgno) + balloon_append(pages[--pgno]); + /* Free the memory back to the kernel soon */ + schedule_delayed_work(&balloon_worker, 0); + mutex_unlock(&balloon_mutex); + return -ENOMEM; } +EXPORT_SYMBOL(alloc_xenballooned_pages); -static int balloon_init_watcher(struct notifier_block *notifier, - unsigned long event, - void *data) +/** + * free_xenballooned_pages - return pages retrieved with get_ballooned_pages + * @nr_pages: Number of pages + * @pages: pages to return + */ +void free_xenballooned_pages(int nr_pages, struct page** pages) { - int err; + int i; - err = register_xenbus_watch(&target_watch); - if (err) - printk(KERN_ERR "Failed to set balloon watcher\n"); + mutex_lock(&balloon_mutex); - return NOTIFY_DONE; -} + for (i = 0; i < nr_pages; i++) { + if (pages[i]) + balloon_append(pages[i]); + } + + /* The balloon may be too large now. Shrink it if needed. */ + if (current_target() != balloon_stats.current_pages) + schedule_delayed_work(&balloon_worker, 0); -static struct notifier_block xenstore_notifier; + mutex_unlock(&balloon_mutex); +} +EXPORT_SYMBOL(free_xenballooned_pages); static int __init balloon_init(void) { @@ -398,7 +435,7 @@ static int __init balloon_init(void) if (!xen_domain()) return -ENODEV; - pr_info("xen_balloon: Initialising balloon driver.\n"); + pr_info("xen/balloon: Initialising balloon driver.\n"); if (xen_pv_domain()) nr_pages = xen_start_info->nr_pages; @@ -408,13 +445,11 @@ static int __init balloon_init(void) balloon_stats.target_pages = balloon_stats.current_pages; balloon_stats.balloon_low = 0; balloon_stats.balloon_high = 0; - balloon_stats.driver_pages = 0UL; - - init_timer(&balloon_timer); - balloon_timer.data = 0; - balloon_timer.function = balloon_alarm; - register_balloon(&balloon_sysdev); + balloon_stats.schedule_delay = 1; + balloon_stats.max_schedule_delay = 32; + balloon_stats.retry_count = 1; + balloon_stats.max_retry_count = RETRY_UNLIMITED; /* * Initialise the balloon with excess memory space. We need @@ -436,153 +471,9 @@ static int __init balloon_init(void) __balloon_append(page); } - target_watch.callback = watch_target; - xenstore_notifier.notifier_call = balloon_init_watcher; - - register_xenstore_notifier(&xenstore_notifier); - return 0; } subsys_initcall(balloon_init); -static void balloon_exit(void) -{ - /* XXX - release balloon here */ - return; -} - -module_exit(balloon_exit); - -#define BALLOON_SHOW(name, format, args...) \ - static ssize_t show_##name(struct sys_device *dev, \ - struct sysdev_attribute *attr, \ - char *buf) \ - { \ - return sprintf(buf, format, ##args); \ - } \ - static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) - -BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); -BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); -BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); -BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(balloon_stats.driver_pages)); - -static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, - char *buf) -{ - return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages)); -} - -static ssize_t store_target_kb(struct sys_device *dev, - struct sysdev_attribute *attr, - const char *buf, - size_t count) -{ - char *endchar; - unsigned long long target_bytes; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - target_bytes = simple_strtoull(buf, &endchar, 0) * 1024; - - balloon_set_new_target(target_bytes >> PAGE_SHIFT); - - return count; -} - -static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, - show_target_kb, store_target_kb); - - -static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, - char *buf) -{ - return sprintf(buf, "%llu\n", - (unsigned long long)balloon_stats.target_pages - << PAGE_SHIFT); -} - -static ssize_t store_target(struct sys_device *dev, - struct sysdev_attribute *attr, - const char *buf, - size_t count) -{ - char *endchar; - unsigned long long target_bytes; - - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - - target_bytes = memparse(buf, &endchar); - - balloon_set_new_target(target_bytes >> PAGE_SHIFT); - - return count; -} - -static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, - show_target, store_target); - - -static struct sysdev_attribute *balloon_attrs[] = { - &attr_target_kb, - &attr_target, -}; - -static struct attribute *balloon_info_attrs[] = { - &attr_current_kb.attr, - &attr_low_kb.attr, - &attr_high_kb.attr, - &attr_driver_kb.attr, - NULL -}; - -static struct attribute_group balloon_info_group = { - .name = "info", - .attrs = balloon_info_attrs, -}; - -static struct sysdev_class balloon_sysdev_class = { - .name = BALLOON_CLASS_NAME, -}; - -static int register_balloon(struct sys_device *sysdev) -{ - int i, error; - - error = sysdev_class_register(&balloon_sysdev_class); - if (error) - return error; - - sysdev->id = 0; - sysdev->cls = &balloon_sysdev_class; - - error = sysdev_register(sysdev); - if (error) { - sysdev_class_unregister(&balloon_sysdev_class); - return error; - } - - for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { - error = sysdev_create_file(sysdev, balloon_attrs[i]); - if (error) - goto fail; - } - - error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); - if (error) - goto fail; - - return 0; - - fail: - while (--i >= 0) - sysdev_remove_file(sysdev, balloon_attrs[i]); - sysdev_unregister(sysdev); - sysdev_class_unregister(&balloon_sysdev_class); - return error; -} - MODULE_LICENSE("GPL"); diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 65f5068afd84..02b5a9c05cfa 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -56,6 +56,8 @@ */ static DEFINE_SPINLOCK(irq_mapping_update_lock); +static LIST_HEAD(xen_irq_list_head); + /* IRQ <-> VIRQ mapping. */ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; @@ -85,7 +87,9 @@ enum xen_irq_type { */ struct irq_info { + struct list_head list; enum xen_irq_type type; /* type */ + unsigned irq; unsigned short evtchn; /* event channel */ unsigned short cpu; /* cpu bound */ @@ -103,23 +107,10 @@ struct irq_info #define PIRQ_NEEDS_EOI (1 << 0) #define PIRQ_SHAREABLE (1 << 1) -static struct irq_info *irq_info; -static int *pirq_to_irq; - static int *evtchn_to_irq; -struct cpu_evtchn_s { - unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; -}; -static __initdata struct cpu_evtchn_s init_evtchn_mask = { - .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul, -}; -static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask; - -static inline unsigned long *cpu_evtchn_mask(int cpu) -{ - return cpu_evtchn_mask_p[cpu].bits; -} +static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], + cpu_evtchn_mask); /* Xen will never allocate port zero for any purpose. */ #define VALID_EVTCHN(chn) ((chn) != 0) @@ -128,46 +119,86 @@ static struct irq_chip xen_dynamic_chip; static struct irq_chip xen_percpu_chip; static struct irq_chip xen_pirq_chip; -/* Constructor for packed IRQ information. */ -static struct irq_info mk_unbound_info(void) +/* Get info for IRQ */ +static struct irq_info *info_for_irq(unsigned irq) +{ + return get_irq_data(irq); +} + +/* Constructors for packed IRQ information. */ +static void xen_irq_info_common_init(struct irq_info *info, + unsigned irq, + enum xen_irq_type type, + unsigned short evtchn, + unsigned short cpu) { - return (struct irq_info) { .type = IRQT_UNBOUND }; + + BUG_ON(info->type != IRQT_UNBOUND && info->type != type); + + info->type = type; + info->irq = irq; + info->evtchn = evtchn; + info->cpu = cpu; + + evtchn_to_irq[evtchn] = irq; } -static struct irq_info mk_evtchn_info(unsigned short evtchn) +static void xen_irq_info_evtchn_init(unsigned irq, + unsigned short evtchn) { - return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, - .cpu = 0 }; + struct irq_info *info = info_for_irq(irq); + + xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0); } -static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) +static void xen_irq_info_ipi_init(unsigned cpu, + unsigned irq, + unsigned short evtchn, + enum ipi_vector ipi) { - return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, - .cpu = 0, .u.ipi = ipi }; + struct irq_info *info = info_for_irq(irq); + + xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0); + + info->u.ipi = ipi; + + per_cpu(ipi_to_irq, cpu)[ipi] = irq; } -static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) +static void xen_irq_info_virq_init(unsigned cpu, + unsigned irq, + unsigned short evtchn, + unsigned short virq) { - return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, - .cpu = 0, .u.virq = virq }; + struct irq_info *info = info_for_irq(irq); + + xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0); + + info->u.virq = virq; + + per_cpu(virq_to_irq, cpu)[virq] = irq; } -static struct irq_info mk_pirq_info(unsigned short evtchn, unsigned short pirq, - unsigned short gsi, unsigned short vector) +static void xen_irq_info_pirq_init(unsigned irq, + unsigned short evtchn, + unsigned short pirq, + unsigned short gsi, + unsigned short vector, + unsigned char flags) { - return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, - .cpu = 0, - .u.pirq = { .pirq = pirq, .gsi = gsi, .vector = vector } }; + struct irq_info *info = info_for_irq(irq); + + xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0); + + info->u.pirq.pirq = pirq; + info->u.pirq.gsi = gsi; + info->u.pirq.vector = vector; + info->u.pirq.flags = flags; } /* * Accessors for packed IRQ information. */ -static struct irq_info *info_for_irq(unsigned irq) -{ - return &irq_info[irq]; -} - static unsigned int evtchn_from_irq(unsigned irq) { if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq))) @@ -212,26 +243,6 @@ static unsigned pirq_from_irq(unsigned irq) return info->u.pirq.pirq; } -static unsigned gsi_from_irq(unsigned irq) -{ - struct irq_info *info = info_for_irq(irq); - - BUG_ON(info == NULL); - BUG_ON(info->type != IRQT_PIRQ); - - return info->u.pirq.gsi; -} - -static unsigned vector_from_irq(unsigned irq) -{ - struct irq_info *info = info_for_irq(irq); - - BUG_ON(info == NULL); - BUG_ON(info->type != IRQT_PIRQ); - - return info->u.pirq.vector; -} - static enum xen_irq_type type_from_irq(unsigned irq) { return info_for_irq(irq)->type; @@ -267,7 +278,7 @@ static inline unsigned long active_evtchns(unsigned int cpu, unsigned int idx) { return (sh->evtchn_pending[idx] & - cpu_evtchn_mask(cpu)[idx] & + per_cpu(cpu_evtchn_mask, cpu)[idx] & ~sh->evtchn_mask[idx]); } @@ -280,28 +291,28 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); #endif - clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); - set_bit(chn, cpu_evtchn_mask(cpu)); + clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq))); + set_bit(chn, per_cpu(cpu_evtchn_mask, cpu)); - irq_info[irq].cpu = cpu; + info_for_irq(irq)->cpu = cpu; } static void init_evtchn_cpu_bindings(void) { int i; #ifdef CONFIG_SMP - struct irq_desc *desc; + struct irq_info *info; /* By default all event channels notify CPU#0. */ - for_each_irq_desc(i, desc) { + list_for_each_entry(info, &xen_irq_list_head, list) { + struct irq_desc *desc = irq_to_desc(info->irq); cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); } #endif for_each_possible_cpu(i) - memset(cpu_evtchn_mask(i), - (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s)); - + memset(per_cpu(cpu_evtchn_mask, i), + (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); } static inline void clear_evtchn(int port) @@ -376,7 +387,28 @@ static void unmask_evtchn(int port) put_cpu(); } -static int xen_allocate_irq_dynamic(void) +static void xen_irq_init(unsigned irq) +{ + struct irq_info *info; + struct irq_desc *desc = irq_to_desc(irq); + +#ifdef CONFIG_SMP + /* By default all event channels notify CPU#0. */ + cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); +#endif + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (info == NULL) + panic("Unable to allocate metadata for IRQ%d\n", irq); + + info->type = IRQT_UNBOUND; + + set_irq_data(irq, info); + + list_add_tail(&info->list, &xen_irq_list_head); +} + +static int __must_check xen_allocate_irq_dynamic(void) { int first = 0; int irq; @@ -393,22 +425,14 @@ static int xen_allocate_irq_dynamic(void) first = get_nr_irqs_gsi(); #endif -retry: irq = irq_alloc_desc_from(first, -1); - if (irq == -ENOMEM && first > NR_IRQS_LEGACY) { - printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n"); - first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY); - goto retry; - } - - if (irq < 0) - panic("No available IRQ to bind to: increase nr_irqs!\n"); + xen_irq_init(irq); return irq; } -static int xen_allocate_irq_gsi(unsigned gsi) +static int __must_check xen_allocate_irq_gsi(unsigned gsi) { int irq; @@ -423,17 +447,25 @@ static int xen_allocate_irq_gsi(unsigned gsi) /* Legacy IRQ descriptors are already allocated by the arch. */ if (gsi < NR_IRQS_LEGACY) - return gsi; + irq = gsi; + else + irq = irq_alloc_desc_at(gsi, -1); - irq = irq_alloc_desc_at(gsi, -1); - if (irq < 0) - panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq); + xen_irq_init(irq); return irq; } static void xen_free_irq(unsigned irq) { + struct irq_info *info = get_irq_data(irq); + + list_del(&info->list); + + set_irq_data(irq, NULL); + + kfree(info); + /* Legacy IRQ descriptors are managed by the arch. */ if (irq < NR_IRQS_LEGACY) return; @@ -563,51 +595,39 @@ static void ack_pirq(struct irq_data *data) static int find_irq_by_gsi(unsigned gsi) { - int irq; + struct irq_info *info; - for (irq = 0; irq < nr_irqs; irq++) { - struct irq_info *info = info_for_irq(irq); - - if (info == NULL || info->type != IRQT_PIRQ) + list_for_each_entry(info, &xen_irq_list_head, list) { + if (info->type != IRQT_PIRQ) continue; - if (gsi_from_irq(irq) == gsi) - return irq; + if (info->u.pirq.gsi == gsi) + return info->irq; } return -1; } -int xen_allocate_pirq(unsigned gsi, int shareable, char *name) +int xen_allocate_pirq_gsi(unsigned gsi) { - return xen_map_pirq_gsi(gsi, gsi, shareable, name); + return gsi; } -/* xen_map_pirq_gsi might allocate irqs from the top down, as a - * consequence don't assume that the irq number returned has a low value - * or can be used as a pirq number unless you know otherwise. - * - * One notable exception is when xen_map_pirq_gsi is called passing an - * hardware gsi as argument, in that case the irq number returned - * matches the gsi number passed as second argument. +/* + * Do not make any assumptions regarding the relationship between the + * IRQ number returned here and the Xen pirq argument. * * Note: We don't assign an event channel until the irq actually started * up. Return an existing irq if we've already got one for the gsi. */ -int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) +int xen_bind_pirq_gsi_to_irq(unsigned gsi, + unsigned pirq, int shareable, char *name) { - int irq = 0; + int irq = -1; struct physdev_irq irq_op; spin_lock(&irq_mapping_update_lock); - if ((pirq > nr_irqs) || (gsi > nr_irqs)) { - printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n", - pirq > nr_irqs ? "pirq" :"", - gsi > nr_irqs ? "gsi" : ""); - goto out; - } - irq = find_irq_by_gsi(gsi); if (irq != -1) { printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", @@ -616,6 +636,8 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) } irq = xen_allocate_irq_gsi(gsi); + if (irq < 0) + goto out; set_irq_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, name); @@ -633,9 +655,8 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) goto out; } - irq_info[irq] = mk_pirq_info(0, pirq, gsi, irq_op.vector); - irq_info[irq].u.pirq.flags |= shareable ? PIRQ_SHAREABLE : 0; - pirq_to_irq[pirq] = irq; + xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, + shareable ? PIRQ_SHAREABLE : 0); out: spin_unlock(&irq_mapping_update_lock); @@ -672,8 +693,7 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, set_irq_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, name); - irq_info[irq] = mk_pirq_info(0, pirq, 0, vector); - pirq_to_irq[pirq] = irq; + xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); ret = irq_set_msi_desc(irq, msidesc); if (ret < 0) goto error_irq; @@ -709,9 +729,6 @@ int xen_destroy_irq(int irq) goto out; } } - pirq_to_irq[info->u.pirq.pirq] = -1; - - irq_info[irq] = mk_unbound_info(); xen_free_irq(irq); @@ -720,19 +737,26 @@ out: return rc; } -int xen_vector_from_irq(unsigned irq) +int xen_irq_from_pirq(unsigned pirq) { - return vector_from_irq(irq); -} + int irq; -int xen_gsi_from_irq(unsigned irq) -{ - return gsi_from_irq(irq); -} + struct irq_info *info; -int xen_irq_from_pirq(unsigned pirq) -{ - return pirq_to_irq[pirq]; + spin_lock(&irq_mapping_update_lock); + + list_for_each_entry(info, &xen_irq_list_head, list) { + if (info == NULL || info->type != IRQT_PIRQ) + continue; + irq = info->irq; + if (info->u.pirq.pirq == pirq) + goto out; + } + irq = -1; +out: + spin_unlock(&irq_mapping_update_lock); + + return irq; } int bind_evtchn_to_irq(unsigned int evtchn) @@ -745,14 +769,16 @@ int bind_evtchn_to_irq(unsigned int evtchn) if (irq == -1) { irq = xen_allocate_irq_dynamic(); + if (irq == -1) + goto out; set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, handle_fasteoi_irq, "event"); - evtchn_to_irq[evtchn] = irq; - irq_info[irq] = mk_evtchn_info(evtchn); + xen_irq_info_evtchn_init(irq, evtchn); } +out: spin_unlock(&irq_mapping_update_lock); return irq; @@ -782,9 +808,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) BUG(); evtchn = bind_ipi.port; - evtchn_to_irq[evtchn] = irq; - irq_info[irq] = mk_ipi_info(evtchn, ipi); - per_cpu(ipi_to_irq, cpu)[ipi] = irq; + xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); bind_evtchn_to_cpu(evtchn, cpu); } @@ -821,6 +845,8 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) if (irq == -1) { irq = xen_allocate_irq_dynamic(); + if (irq == -1) + goto out; set_irq_chip_and_handler_name(irq, &xen_percpu_chip, handle_percpu_irq, "virq"); @@ -832,14 +858,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) BUG(); evtchn = bind_virq.port; - evtchn_to_irq[evtchn] = irq; - irq_info[irq] = mk_virq_info(evtchn, virq); - - per_cpu(virq_to_irq, cpu)[virq] = irq; + xen_irq_info_virq_init(cpu, irq, evtchn, virq); bind_evtchn_to_cpu(evtchn, cpu); } +out: spin_unlock(&irq_mapping_update_lock); return irq; @@ -876,11 +900,9 @@ static void unbind_from_irq(unsigned int irq) evtchn_to_irq[evtchn] = -1; } - if (irq_info[irq].type != IRQT_UNBOUND) { - irq_info[irq] = mk_unbound_info(); + BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND); - xen_free_irq(irq); - } + xen_free_irq(irq); spin_unlock(&irq_mapping_update_lock); } @@ -894,6 +916,8 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn, int retval; irq = bind_evtchn_to_irq(evtchn); + if (irq < 0) + return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); @@ -935,6 +959,8 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, int retval; irq = bind_virq_to_irq(virq, cpu); + if (irq < 0) + return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); @@ -986,7 +1012,7 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) { struct shared_info *sh = HYPERVISOR_shared_info; int cpu = smp_processor_id(); - unsigned long *cpu_evtchn = cpu_evtchn_mask(cpu); + unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); int i; unsigned long flags; static DEFINE_SPINLOCK(debug_lock); @@ -1064,6 +1090,13 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) } static DEFINE_PER_CPU(unsigned, xed_nesting_count); +static DEFINE_PER_CPU(unsigned int, current_word_idx); +static DEFINE_PER_CPU(unsigned int, current_bit_idx); + +/* + * Mask out the i least significant bits of w + */ +#define MASK_LSBS(w, i) (w & ((~0UL) << i)) /* * Search the CPUs pending events bitmasks. For each one found, map @@ -1076,6 +1109,9 @@ static DEFINE_PER_CPU(unsigned, xed_nesting_count); */ static void __xen_evtchn_do_upcall(void) { + int start_word_idx, start_bit_idx; + int word_idx, bit_idx; + int i; int cpu = get_cpu(); struct shared_info *s = HYPERVISOR_shared_info; struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); @@ -1094,17 +1130,57 @@ static void __xen_evtchn_do_upcall(void) wmb(); #endif pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); - while (pending_words != 0) { + + start_word_idx = __this_cpu_read(current_word_idx); + start_bit_idx = __this_cpu_read(current_bit_idx); + + word_idx = start_word_idx; + + for (i = 0; pending_words != 0; i++) { unsigned long pending_bits; - int word_idx = __ffs(pending_words); - pending_words &= ~(1UL << word_idx); + unsigned long words; - while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { - int bit_idx = __ffs(pending_bits); - int port = (word_idx * BITS_PER_LONG) + bit_idx; - int irq = evtchn_to_irq[port]; + words = MASK_LSBS(pending_words, word_idx); + + /* + * If we masked out all events, wrap to beginning. + */ + if (words == 0) { + word_idx = 0; + bit_idx = 0; + continue; + } + word_idx = __ffs(words); + + pending_bits = active_evtchns(cpu, s, word_idx); + bit_idx = 0; /* usually scan entire word from start */ + if (word_idx == start_word_idx) { + /* We scan the starting word in two parts */ + if (i == 0) + /* 1st time: start in the middle */ + bit_idx = start_bit_idx; + else + /* 2nd time: mask bits done already */ + bit_idx &= (1UL << start_bit_idx) - 1; + } + + do { + unsigned long bits; + int port, irq; struct irq_desc *desc; + bits = MASK_LSBS(pending_bits, bit_idx); + + /* If we masked out all events, move on. */ + if (bits == 0) + break; + + bit_idx = __ffs(bits); + + /* Process port. */ + port = (word_idx * BITS_PER_LONG) + bit_idx; + irq = evtchn_to_irq[port]; + mask_evtchn(port); clear_evtchn(port); @@ -1113,7 +1189,21 @@ static void __xen_evtchn_do_upcall(void) if (desc) generic_handle_irq_desc(irq, desc); } - } + + bit_idx = (bit_idx + 1) % BITS_PER_LONG; + + /* Next caller starts at last processed + 1 */ + __this_cpu_write(current_word_idx, + bit_idx ? word_idx : + (word_idx+1) % BITS_PER_LONG); + __this_cpu_write(current_bit_idx, bit_idx); + } while (bit_idx != 0); + + /* Scan start_l1i twice; all others once. */ + if ((word_idx != start_word_idx) || (i != 0)) + pending_words &= ~(1UL << word_idx); + + word_idx = (word_idx + 1) % BITS_PER_LONG; } BUG_ON(!irqs_disabled()); @@ -1163,8 +1253,7 @@ void rebind_evtchn_irq(int evtchn, int irq) so there should be a proper type */ BUG_ON(info->type == IRQT_UNBOUND); - evtchn_to_irq[evtchn] = irq; - irq_info[irq] = mk_evtchn_info(evtchn); + xen_irq_info_evtchn_init(irq, evtchn); spin_unlock(&irq_mapping_update_lock); @@ -1181,10 +1270,14 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) struct evtchn_bind_vcpu bind_vcpu; int evtchn = evtchn_from_irq(irq); - /* events delivered via platform PCI interrupts are always - * routed to vcpu 0 */ - if (!VALID_EVTCHN(evtchn) || - (xen_hvm_domain() && !xen_have_vector_callback)) + if (!VALID_EVTCHN(evtchn)) + return -1; + + /* + * Events delivered via platform PCI interrupts are always + * routed to vcpu 0 and hence cannot be rebound. + */ + if (xen_hvm_domain() && !xen_have_vector_callback) return -1; /* Send future instances of this interrupt to other vcpu. */ @@ -1271,19 +1364,22 @@ static int retrigger_dynirq(struct irq_data *data) return ret; } -static void restore_cpu_pirqs(void) +static void restore_pirqs(void) { int pirq, rc, irq, gsi; struct physdev_map_pirq map_irq; + struct irq_info *info; - for (pirq = 0; pirq < nr_irqs; pirq++) { - irq = pirq_to_irq[pirq]; - if (irq == -1) + list_for_each_entry(info, &xen_irq_list_head, list) { + if (info->type != IRQT_PIRQ) continue; + pirq = info->u.pirq.pirq; + gsi = info->u.pirq.gsi; + irq = info->irq; + /* save/restore of PT devices doesn't work, so at this point the * only devices present are GSI based emulated devices */ - gsi = gsi_from_irq(irq); if (!gsi) continue; @@ -1296,8 +1392,7 @@ static void restore_cpu_pirqs(void) if (rc) { printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", gsi, irq, pirq, rc); - irq_info[irq] = mk_unbound_info(); - pirq_to_irq[pirq] = -1; + xen_free_irq(irq); continue; } @@ -1327,8 +1422,7 @@ static void restore_cpu_virqs(unsigned int cpu) evtchn = bind_virq.port; /* Record the new mapping. */ - evtchn_to_irq[evtchn] = irq; - irq_info[irq] = mk_virq_info(evtchn, virq); + xen_irq_info_virq_init(cpu, irq, evtchn, virq); bind_evtchn_to_cpu(evtchn, cpu); } } @@ -1352,8 +1446,7 @@ static void restore_cpu_ipis(unsigned int cpu) evtchn = bind_ipi.port; /* Record the new mapping. */ - evtchn_to_irq[evtchn] = irq; - irq_info[irq] = mk_ipi_info(evtchn, ipi); + xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); bind_evtchn_to_cpu(evtchn, cpu); } } @@ -1413,7 +1506,8 @@ void xen_poll_irq(int irq) void xen_irq_resume(void) { - unsigned int cpu, irq, evtchn; + unsigned int cpu, evtchn; + struct irq_info *info; init_evtchn_cpu_bindings(); @@ -1422,8 +1516,8 @@ void xen_irq_resume(void) mask_evtchn(evtchn); /* No IRQ <-> event-channel mappings. */ - for (irq = 0; irq < nr_irqs; irq++) - irq_info[irq].evtchn = 0; /* zap event-channel binding */ + list_for_each_entry(info, &xen_irq_list_head, list) + info->evtchn = 0; /* zap event-channel binding */ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) evtchn_to_irq[evtchn] = -1; @@ -1433,7 +1527,7 @@ void xen_irq_resume(void) restore_cpu_ipis(cpu); } - restore_cpu_pirqs(); + restore_pirqs(); } static struct irq_chip xen_dynamic_chip __read_mostly = { @@ -1519,17 +1613,6 @@ void __init xen_init_IRQ(void) { int i; - cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), - GFP_KERNEL); - irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); - - /* We are using nr_irqs as the maximum number of pirq available but - * that number is actually chosen by Xen and we don't know exactly - * what it is. Be careful choosing high pirq numbers. */ - pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL); - for (i = 0; i < nr_irqs; i++) - pirq_to_irq[i] = -1; - evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), GFP_KERNEL); for (i = 0; i < NR_EVENT_CHANNELS; i++) diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c new file mode 100644 index 000000000000..a7ffdfe19fc9 --- /dev/null +++ b/drivers/xen/gntalloc.c @@ -0,0 +1,545 @@ +/****************************************************************************** + * gntalloc.c + * + * Device for creating grant references (in user-space) that may be shared + * with other domains. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* + * This driver exists to allow userspace programs in Linux to allocate kernel + * memory that will later be shared with another domain. Without this device, + * Linux userspace programs cannot create grant references. + * + * How this stuff works: + * X -> granting a page to Y + * Y -> mapping the grant from X + * + * 1. X uses the gntalloc device to allocate a page of kernel memory, P. + * 2. X creates an entry in the grant table that says domid(Y) can access P. + * This is done without a hypercall unless the grant table needs expansion. + * 3. X gives the grant reference identifier, GREF, to Y. + * 4. Y maps the page, either directly into kernel memory for use in a backend + * driver, or via a the gntdev device to map into the address space of an + * application running in Y. This is the first point at which Xen does any + * tracking of the page. + * 5. A program in X mmap()s a segment of the gntalloc device that corresponds + * to the shared page, and can now communicate with Y over the shared page. + * + * + * NOTE TO USERSPACE LIBRARIES: + * The grant allocation and mmap()ing are, naturally, two separate operations. + * You set up the sharing by calling the create ioctl() and then the mmap(). + * Teardown requires munmap() and either close() or ioctl(). + * + * WARNING: Since Xen does not allow a guest to forcibly end the use of a grant + * reference, this device can be used to consume kernel memory by leaving grant + * references mapped by another domain when an application exits. Therefore, + * there is a global limit on the number of pages that can be allocated. When + * all references to the page are unmapped, it will be freed during the next + * grant operation. + */ + +#include <linux/atomic.h> +#include <linux/module.h> +#include <linux/miscdevice.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/highmem.h> + +#include <xen/xen.h> +#include <xen/page.h> +#include <xen/grant_table.h> +#include <xen/gntalloc.h> +#include <xen/events.h> + +static int limit = 1024; +module_param(limit, int, 0644); +MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by " + "the gntalloc device"); + +static LIST_HEAD(gref_list); +static DEFINE_SPINLOCK(gref_lock); +static int gref_size; + +struct notify_info { + uint16_t pgoff:12; /* Bits 0-11: Offset of the byte to clear */ + uint16_t flags:2; /* Bits 12-13: Unmap notification flags */ + int event; /* Port (event channel) to notify */ +}; + +/* Metadata on a grant reference. */ +struct gntalloc_gref { + struct list_head next_gref; /* list entry gref_list */ + struct list_head next_file; /* list entry file->list, if open */ + struct page *page; /* The shared page */ + uint64_t file_index; /* File offset for mmap() */ + unsigned int users; /* Use count - when zero, waiting on Xen */ + grant_ref_t gref_id; /* The grant reference number */ + struct notify_info notify; /* Unmap notification */ +}; + +struct gntalloc_file_private_data { + struct list_head list; + uint64_t index; +}; + +static void __del_gref(struct gntalloc_gref *gref); + +static void do_cleanup(void) +{ + struct gntalloc_gref *gref, *n; + list_for_each_entry_safe(gref, n, &gref_list, next_gref) { + if (!gref->users) + __del_gref(gref); + } +} + +static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, + uint32_t *gref_ids, struct gntalloc_file_private_data *priv) +{ + int i, rc, readonly; + LIST_HEAD(queue_gref); + LIST_HEAD(queue_file); + struct gntalloc_gref *gref; + + readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE); + rc = -ENOMEM; + for (i = 0; i < op->count; i++) { + gref = kzalloc(sizeof(*gref), GFP_KERNEL); + if (!gref) + goto undo; + list_add_tail(&gref->next_gref, &queue_gref); + list_add_tail(&gref->next_file, &queue_file); + gref->users = 1; + gref->file_index = op->index + i * PAGE_SIZE; + gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO); + if (!gref->page) + goto undo; + + /* Grant foreign access to the page. */ + gref->gref_id = gnttab_grant_foreign_access(op->domid, + pfn_to_mfn(page_to_pfn(gref->page)), readonly); + if (gref->gref_id < 0) { + rc = gref->gref_id; + goto undo; + } + gref_ids[i] = gref->gref_id; + } + + /* Add to gref lists. */ + spin_lock(&gref_lock); + list_splice_tail(&queue_gref, &gref_list); + list_splice_tail(&queue_file, &priv->list); + spin_unlock(&gref_lock); + + return 0; + +undo: + spin_lock(&gref_lock); + gref_size -= (op->count - i); + + list_for_each_entry(gref, &queue_file, next_file) { + /* __del_gref does not remove from queue_file */ + __del_gref(gref); + } + + /* It's possible for the target domain to map the just-allocated grant + * references by blindly guessing their IDs; if this is done, then + * __del_gref will leave them in the queue_gref list. They need to be + * added to the global list so that we can free them when they are no + * longer referenced. + */ + if (unlikely(!list_empty(&queue_gref))) + list_splice_tail(&queue_gref, &gref_list); + spin_unlock(&gref_lock); + return rc; +} + +static void __del_gref(struct gntalloc_gref *gref) +{ + if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { + uint8_t *tmp = kmap(gref->page); + tmp[gref->notify.pgoff] = 0; + kunmap(gref->page); + } + if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) + notify_remote_via_evtchn(gref->notify.event); + + gref->notify.flags = 0; + + if (gref->gref_id > 0) { + if (gnttab_query_foreign_access(gref->gref_id)) + return; + + if (!gnttab_end_foreign_access_ref(gref->gref_id, 0)) + return; + } + + gref_size--; + list_del(&gref->next_gref); + + if (gref->page) + __free_page(gref->page); + + kfree(gref); +} + +/* finds contiguous grant references in a file, returns the first */ +static struct gntalloc_gref *find_grefs(struct gntalloc_file_private_data *priv, + uint64_t index, uint32_t count) +{ + struct gntalloc_gref *rv = NULL, *gref; + list_for_each_entry(gref, &priv->list, next_file) { + if (gref->file_index == index && !rv) + rv = gref; + if (rv) { + if (gref->file_index != index) + return NULL; + index += PAGE_SIZE; + count--; + if (count == 0) + return rv; + } + } + return NULL; +} + +/* + * ------------------------------------- + * File operations. + * ------------------------------------- + */ +static int gntalloc_open(struct inode *inode, struct file *filp) +{ + struct gntalloc_file_private_data *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + goto out_nomem; + INIT_LIST_HEAD(&priv->list); + + filp->private_data = priv; + + pr_debug("%s: priv %p\n", __func__, priv); + + return 0; + +out_nomem: + return -ENOMEM; +} + +static int gntalloc_release(struct inode *inode, struct file *filp) +{ + struct gntalloc_file_private_data *priv = filp->private_data; + struct gntalloc_gref *gref; + + pr_debug("%s: priv %p\n", __func__, priv); + + spin_lock(&gref_lock); + while (!list_empty(&priv->list)) { + gref = list_entry(priv->list.next, + struct gntalloc_gref, next_file); + list_del(&gref->next_file); + gref->users--; + if (gref->users == 0) + __del_gref(gref); + } + kfree(priv); + spin_unlock(&gref_lock); + + return 0; +} + +static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv, + struct ioctl_gntalloc_alloc_gref __user *arg) +{ + int rc = 0; + struct ioctl_gntalloc_alloc_gref op; + uint32_t *gref_ids; + + pr_debug("%s: priv %p\n", __func__, priv); + + if (copy_from_user(&op, arg, sizeof(op))) { + rc = -EFAULT; + goto out; + } + + gref_ids = kzalloc(sizeof(gref_ids[0]) * op.count, GFP_TEMPORARY); + if (!gref_ids) { + rc = -ENOMEM; + goto out; + } + + spin_lock(&gref_lock); + /* Clean up pages that were at zero (local) users but were still mapped + * by remote domains. Since those pages count towards the limit that we + * are about to enforce, removing them here is a good idea. + */ + do_cleanup(); + if (gref_size + op.count > limit) { + spin_unlock(&gref_lock); + rc = -ENOSPC; + goto out_free; + } + gref_size += op.count; + op.index = priv->index; + priv->index += op.count * PAGE_SIZE; + spin_unlock(&gref_lock); + + rc = add_grefs(&op, gref_ids, priv); + if (rc < 0) + goto out_free; + + /* Once we finish add_grefs, it is unsafe to touch the new reference, + * since it is possible for a concurrent ioctl to remove it (by guessing + * its index). If the userspace application doesn't provide valid memory + * to write the IDs to, then it will need to close the file in order to + * release - which it will do by segfaulting when it tries to access the + * IDs to close them. + */ + if (copy_to_user(arg, &op, sizeof(op))) { + rc = -EFAULT; + goto out_free; + } + if (copy_to_user(arg->gref_ids, gref_ids, + sizeof(gref_ids[0]) * op.count)) { + rc = -EFAULT; + goto out_free; + } + +out_free: + kfree(gref_ids); +out: + return rc; +} + +static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv, + void __user *arg) +{ + int i, rc = 0; + struct ioctl_gntalloc_dealloc_gref op; + struct gntalloc_gref *gref, *n; + + pr_debug("%s: priv %p\n", __func__, priv); + + if (copy_from_user(&op, arg, sizeof(op))) { + rc = -EFAULT; + goto dealloc_grant_out; + } + + spin_lock(&gref_lock); + gref = find_grefs(priv, op.index, op.count); + if (gref) { + /* Remove from the file list only, and decrease reference count. + * The later call to do_cleanup() will remove from gref_list and + * free the memory if the pages aren't mapped anywhere. + */ + for (i = 0; i < op.count; i++) { + n = list_entry(gref->next_file.next, + struct gntalloc_gref, next_file); + list_del(&gref->next_file); + gref->users--; + gref = n; + } + } else { + rc = -EINVAL; + } + + do_cleanup(); + + spin_unlock(&gref_lock); +dealloc_grant_out: + return rc; +} + +static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv, + void __user *arg) +{ + struct ioctl_gntalloc_unmap_notify op; + struct gntalloc_gref *gref; + uint64_t index; + int pgoff; + int rc; + + if (copy_from_user(&op, arg, sizeof(op))) + return -EFAULT; + + index = op.index & ~(PAGE_SIZE - 1); + pgoff = op.index & (PAGE_SIZE - 1); + + spin_lock(&gref_lock); + + gref = find_grefs(priv, index, 1); + if (!gref) { + rc = -ENOENT; + goto unlock_out; + } + + if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) { + rc = -EINVAL; + goto unlock_out; + } + + gref->notify.flags = op.action; + gref->notify.pgoff = pgoff; + gref->notify.event = op.event_channel_port; + rc = 0; + unlock_out: + spin_unlock(&gref_lock); + return rc; +} + +static long gntalloc_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct gntalloc_file_private_data *priv = filp->private_data; + + switch (cmd) { + case IOCTL_GNTALLOC_ALLOC_GREF: + return gntalloc_ioctl_alloc(priv, (void __user *)arg); + + case IOCTL_GNTALLOC_DEALLOC_GREF: + return gntalloc_ioctl_dealloc(priv, (void __user *)arg); + + case IOCTL_GNTALLOC_SET_UNMAP_NOTIFY: + return gntalloc_ioctl_unmap_notify(priv, (void __user *)arg); + + default: + return -ENOIOCTLCMD; + } + + return 0; +} + +static void gntalloc_vma_close(struct vm_area_struct *vma) +{ + struct gntalloc_gref *gref = vma->vm_private_data; + if (!gref) + return; + + spin_lock(&gref_lock); + gref->users--; + if (gref->users == 0) + __del_gref(gref); + spin_unlock(&gref_lock); +} + +static struct vm_operations_struct gntalloc_vmops = { + .close = gntalloc_vma_close, +}; + +static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct gntalloc_file_private_data *priv = filp->private_data; + struct gntalloc_gref *gref; + int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + int rv, i; + + pr_debug("%s: priv %p, page %lu+%d\n", __func__, + priv, vma->vm_pgoff, count); + + if (!(vma->vm_flags & VM_SHARED)) { + printk(KERN_ERR "%s: Mapping must be shared.\n", __func__); + return -EINVAL; + } + + spin_lock(&gref_lock); + gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count); + if (gref == NULL) { + rv = -ENOENT; + pr_debug("%s: Could not find grant reference", + __func__); + goto out_unlock; + } + + vma->vm_private_data = gref; + + vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_DONTCOPY; + vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP; + + vma->vm_ops = &gntalloc_vmops; + + for (i = 0; i < count; i++) { + gref->users++; + rv = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE, + gref->page); + if (rv) + goto out_unlock; + + gref = list_entry(gref->next_file.next, + struct gntalloc_gref, next_file); + } + rv = 0; + +out_unlock: + spin_unlock(&gref_lock); + return rv; +} + +static const struct file_operations gntalloc_fops = { + .owner = THIS_MODULE, + .open = gntalloc_open, + .release = gntalloc_release, + .unlocked_ioctl = gntalloc_ioctl, + .mmap = gntalloc_mmap +}; + +/* + * ------------------------------------- + * Module creation/destruction. + * ------------------------------------- + */ +static struct miscdevice gntalloc_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "xen/gntalloc", + .fops = &gntalloc_fops, +}; + +static int __init gntalloc_init(void) +{ + int err; + + if (!xen_domain()) + return -ENODEV; + + err = misc_register(&gntalloc_miscdev); + if (err != 0) { + printk(KERN_ERR "Could not register misc gntalloc device\n"); + return err; + } + + pr_debug("Created grant allocation device at %d,%d\n", + MISC_MAJOR, gntalloc_miscdev.minor); + + return 0; +} + +static void __exit gntalloc_exit(void) +{ + misc_deregister(&gntalloc_miscdev); +} + +module_init(gntalloc_init); +module_exit(gntalloc_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Carter Weatherly <carter.weatherly@jhuapl.edu>, " + "Daniel De Graaf <dgdegra@tycho.nsa.gov>"); +MODULE_DESCRIPTION("User-space grant reference allocator driver"); diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 1e31cdcdae1e..017ce600fbc6 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -32,10 +32,13 @@ #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/highmem.h> #include <xen/xen.h> #include <xen/grant_table.h> +#include <xen/balloon.h> #include <xen/gntdev.h> +#include <xen/events.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include <asm/xen/page.h> @@ -45,35 +48,46 @@ MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, " "Gerd Hoffmann <kraxel@redhat.com>"); MODULE_DESCRIPTION("User-space granted page access driver"); -static int limit = 1024; +static int limit = 1024*1024; module_param(limit, int, 0644); -MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped at " - "once by a gntdev instance"); +MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by " + "the gntdev device"); + +static atomic_t pages_mapped = ATOMIC_INIT(0); + +static int use_ptemod; struct gntdev_priv { struct list_head maps; - uint32_t used; - uint32_t limit; /* lock protects maps from concurrent changes */ spinlock_t lock; struct mm_struct *mm; struct mmu_notifier mn; }; +struct unmap_notify { + int flags; + /* Address relative to the start of the grant_map */ + int addr; + int event; +}; + struct grant_map { struct list_head next; - struct gntdev_priv *priv; struct vm_area_struct *vma; int index; int count; int flags; - int is_mapped; + atomic_t users; + struct unmap_notify notify; struct ioctl_gntdev_grant_ref *grants; struct gnttab_map_grant_ref *map_ops; struct gnttab_unmap_grant_ref *unmap_ops; struct page **pages; }; +static int unmap_grant_pages(struct grant_map *map, int offset, int pages); + /* ------------------------------------------------------------------ */ static void gntdev_print_maps(struct gntdev_priv *priv, @@ -82,9 +96,7 @@ static void gntdev_print_maps(struct gntdev_priv *priv, #ifdef DEBUG struct grant_map *map; - pr_debug("maps list (priv %p, usage %d/%d)\n", - priv, priv->used, priv->limit); - + pr_debug("%s: maps list (priv %p)\n", __func__, priv); list_for_each_entry(map, &priv->maps, next) pr_debug(" index %2d, count %2d %s\n", map->index, map->count, @@ -111,27 +123,21 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) NULL == add->pages) goto err; + if (alloc_xenballooned_pages(count, add->pages)) + goto err; + for (i = 0; i < count; i++) { - add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); - if (add->pages[i] == NULL) - goto err; + add->map_ops[i].handle = -1; + add->unmap_ops[i].handle = -1; } add->index = 0; add->count = count; - add->priv = priv; - - if (add->count + priv->used > priv->limit) - goto err; + atomic_set(&add->users, 1); return add; err: - if (add->pages) - for (i = 0; i < count; i++) { - if (add->pages[i]) - __free_page(add->pages[i]); - } kfree(add->pages); kfree(add->grants); kfree(add->map_ops); @@ -154,7 +160,6 @@ static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add) list_add_tail(&add->next, &priv->maps); done: - priv->used += add->count; gntdev_print_maps(priv, "[new]", add->index); } @@ -166,57 +171,33 @@ static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv, list_for_each_entry(map, &priv->maps, next) { if (map->index != index) continue; - if (map->count != count) + if (count && map->count != count) continue; return map; } return NULL; } -static struct grant_map *gntdev_find_map_vaddr(struct gntdev_priv *priv, - unsigned long vaddr) +static void gntdev_put_map(struct grant_map *map) { - struct grant_map *map; - - list_for_each_entry(map, &priv->maps, next) { - if (!map->vma) - continue; - if (vaddr < map->vma->vm_start) - continue; - if (vaddr >= map->vma->vm_end) - continue; - return map; - } - return NULL; -} - -static int gntdev_del_map(struct grant_map *map) -{ - int i; + if (!map) + return; - if (map->vma) - return -EBUSY; - for (i = 0; i < map->count; i++) - if (map->unmap_ops[i].handle) - return -EBUSY; + if (!atomic_dec_and_test(&map->users)) + return; - map->priv->used -= map->count; - list_del(&map->next); - return 0; -} + atomic_sub(map->count, &pages_mapped); -static void gntdev_free_map(struct grant_map *map) -{ - int i; + if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { + notify_remote_via_evtchn(map->notify.event); + } - if (!map) - return; + if (map->pages) { + if (!use_ptemod) + unmap_grant_pages(map, 0, map->count); - if (map->pages) - for (i = 0; i < map->count; i++) { - if (map->pages[i]) - __free_page(map->pages[i]); - } + free_xenballooned_pages(map->count, map->pages); + } kfree(map->pages); kfree(map->grants); kfree(map->map_ops); @@ -231,18 +212,17 @@ static int find_grant_ptes(pte_t *pte, pgtable_t token, { struct grant_map *map = data; unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; + int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; u64 pte_maddr; BUG_ON(pgnr >= map->count); pte_maddr = arbitrary_virt_to_machine(pte).maddr; - gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, - GNTMAP_contains_pte | map->flags, + gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, map->grants[pgnr].ref, map->grants[pgnr].domid); - gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, - GNTMAP_contains_pte | map->flags, - 0 /* handle */); + gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags, + -1 /* handle */); return 0; } @@ -250,6 +230,21 @@ static int map_grant_pages(struct grant_map *map) { int i, err = 0; + if (!use_ptemod) { + /* Note: it could already be mapped */ + if (map->map_ops[0].handle != -1) + return 0; + for (i = 0; i < map->count; i++) { + unsigned long addr = (unsigned long) + pfn_to_kaddr(page_to_pfn(map->pages[i])); + gnttab_set_map_op(&map->map_ops[i], addr, map->flags, + map->grants[i].ref, + map->grants[i].domid); + gnttab_set_unmap_op(&map->unmap_ops[i], addr, + map->flags, -1 /* handle */); + } + } + pr_debug("map %d+%d\n", map->index, map->count); err = gnttab_map_refs(map->map_ops, map->pages, map->count); if (err) @@ -258,28 +253,81 @@ static int map_grant_pages(struct grant_map *map) for (i = 0; i < map->count; i++) { if (map->map_ops[i].status) err = -EINVAL; - map->unmap_ops[i].handle = map->map_ops[i].handle; + else { + BUG_ON(map->map_ops[i].handle == -1); + map->unmap_ops[i].handle = map->map_ops[i].handle; + pr_debug("map handle=%d\n", map->map_ops[i].handle); + } } return err; } -static int unmap_grant_pages(struct grant_map *map, int offset, int pages) +static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) { int i, err = 0; - pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages); - err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages, pages); + if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { + int pgno = (map->notify.addr >> PAGE_SHIFT); + if (pgno >= offset && pgno < offset + pages && use_ptemod) { + void __user *tmp = (void __user *) + map->vma->vm_start + map->notify.addr; + err = copy_to_user(tmp, &err, 1); + if (err) + return err; + map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; + } else if (pgno >= offset && pgno < offset + pages) { + uint8_t *tmp = kmap(map->pages[pgno]); + tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; + kunmap(map->pages[pgno]); + map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; + } + } + + err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages); if (err) return err; for (i = 0; i < pages; i++) { if (map->unmap_ops[offset+i].status) err = -EINVAL; - map->unmap_ops[offset+i].handle = 0; + pr_debug("unmap handle=%d st=%d\n", + map->unmap_ops[offset+i].handle, + map->unmap_ops[offset+i].status); + map->unmap_ops[offset+i].handle = -1; } return err; } +static int unmap_grant_pages(struct grant_map *map, int offset, int pages) +{ + int range, err = 0; + + pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); + + /* It is possible the requested range will have a "hole" where we + * already unmapped some of the grants. Only unmap valid ranges. + */ + while (pages && !err) { + while (pages && map->unmap_ops[offset].handle == -1) { + offset++; + pages--; + } + range = 0; + while (range < pages) { + if (map->unmap_ops[offset+range].handle == -1) { + range--; + break; + } + range++; + } + err = __unmap_grant_pages(map, offset, range); + offset += range; + pages -= range; + } + + return err; +} + /* ------------------------------------------------------------------ */ static void gntdev_vma_close(struct vm_area_struct *vma) @@ -287,22 +335,13 @@ static void gntdev_vma_close(struct vm_area_struct *vma) struct grant_map *map = vma->vm_private_data; pr_debug("close %p\n", vma); - map->is_mapped = 0; map->vma = NULL; vma->vm_private_data = NULL; -} - -static int gntdev_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - pr_debug("vaddr %p, pgoff %ld (shouldn't happen)\n", - vmf->virtual_address, vmf->pgoff); - vmf->flags = VM_FAULT_ERROR; - return 0; + gntdev_put_map(map); } static struct vm_operations_struct gntdev_vmops = { .close = gntdev_vma_close, - .fault = gntdev_vma_fault, }; /* ------------------------------------------------------------------ */ @@ -320,8 +359,6 @@ static void mn_invl_range_start(struct mmu_notifier *mn, list_for_each_entry(map, &priv->maps, next) { if (!map->vma) continue; - if (!map->is_mapped) - continue; if (map->vma->vm_start >= end) continue; if (map->vma->vm_end <= start) @@ -386,16 +423,17 @@ static int gntdev_open(struct inode *inode, struct file *flip) INIT_LIST_HEAD(&priv->maps); spin_lock_init(&priv->lock); - priv->limit = limit; - priv->mm = get_task_mm(current); - if (!priv->mm) { - kfree(priv); - return -ENOMEM; + if (use_ptemod) { + priv->mm = get_task_mm(current); + if (!priv->mm) { + kfree(priv); + return -ENOMEM; + } + priv->mn.ops = &gntdev_mmu_ops; + ret = mmu_notifier_register(&priv->mn, priv->mm); + mmput(priv->mm); } - priv->mn.ops = &gntdev_mmu_ops; - ret = mmu_notifier_register(&priv->mn, priv->mm); - mmput(priv->mm); if (ret) { kfree(priv); @@ -412,21 +450,19 @@ static int gntdev_release(struct inode *inode, struct file *flip) { struct gntdev_priv *priv = flip->private_data; struct grant_map *map; - int err; pr_debug("priv %p\n", priv); spin_lock(&priv->lock); while (!list_empty(&priv->maps)) { map = list_entry(priv->maps.next, struct grant_map, next); - err = gntdev_del_map(map); - if (WARN_ON(err)) - gntdev_free_map(map); - + list_del(&map->next); + gntdev_put_map(map); } spin_unlock(&priv->lock); - mmu_notifier_unregister(&priv->mn, priv->mm); + if (use_ptemod) + mmu_notifier_unregister(&priv->mn, priv->mm); kfree(priv); return 0; } @@ -443,16 +479,21 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, pr_debug("priv %p, add %d\n", priv, op.count); if (unlikely(op.count <= 0)) return -EINVAL; - if (unlikely(op.count > priv->limit)) - return -EINVAL; err = -ENOMEM; map = gntdev_alloc_map(priv, op.count); if (!map) return err; + + if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) { + pr_debug("can't map: over limit\n"); + gntdev_put_map(map); + return err; + } + if (copy_from_user(map->grants, &u->refs, sizeof(map->grants[0]) * op.count) != 0) { - gntdev_free_map(map); + gntdev_put_map(map); return err; } @@ -461,13 +502,9 @@ static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv, op.index = map->index << PAGE_SHIFT; spin_unlock(&priv->lock); - if (copy_to_user(u, &op, sizeof(op)) != 0) { - spin_lock(&priv->lock); - gntdev_del_map(map); - spin_unlock(&priv->lock); - gntdev_free_map(map); - return err; - } + if (copy_to_user(u, &op, sizeof(op)) != 0) + return -EFAULT; + return 0; } @@ -484,11 +521,12 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv, spin_lock(&priv->lock); map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); - if (map) - err = gntdev_del_map(map); + if (map) { + list_del(&map->next); + gntdev_put_map(map); + err = 0; + } spin_unlock(&priv->lock); - if (!err) - gntdev_free_map(map); return err; } @@ -496,43 +534,66 @@ static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv, struct ioctl_gntdev_get_offset_for_vaddr __user *u) { struct ioctl_gntdev_get_offset_for_vaddr op; + struct vm_area_struct *vma; struct grant_map *map; if (copy_from_user(&op, u, sizeof(op)) != 0) return -EFAULT; pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr); - spin_lock(&priv->lock); - map = gntdev_find_map_vaddr(priv, op.vaddr); - if (map == NULL || - map->vma->vm_start != op.vaddr) { - spin_unlock(&priv->lock); + vma = find_vma(current->mm, op.vaddr); + if (!vma || vma->vm_ops != &gntdev_vmops) return -EINVAL; - } + + map = vma->vm_private_data; + if (!map) + return -EINVAL; + op.offset = map->index << PAGE_SHIFT; op.count = map->count; - spin_unlock(&priv->lock); if (copy_to_user(u, &op, sizeof(op)) != 0) return -EFAULT; return 0; } -static long gntdev_ioctl_set_max_grants(struct gntdev_priv *priv, - struct ioctl_gntdev_set_max_grants __user *u) +static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u) { - struct ioctl_gntdev_set_max_grants op; + struct ioctl_gntdev_unmap_notify op; + struct grant_map *map; + int rc; - if (copy_from_user(&op, u, sizeof(op)) != 0) + if (copy_from_user(&op, u, sizeof(op))) return -EFAULT; - pr_debug("priv %p, limit %d\n", priv, op.count); - if (op.count > limit) - return -E2BIG; + + if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT)) + return -EINVAL; spin_lock(&priv->lock); - priv->limit = op.count; + + list_for_each_entry(map, &priv->maps, next) { + uint64_t begin = map->index << PAGE_SHIFT; + uint64_t end = (map->index + map->count) << PAGE_SHIFT; + if (op.index >= begin && op.index < end) + goto found; + } + rc = -ENOENT; + goto unlock_out; + + found: + if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) && + (map->flags & GNTMAP_readonly)) { + rc = -EINVAL; + goto unlock_out; + } + + map->notify.flags = op.action; + map->notify.addr = op.index - (map->index << PAGE_SHIFT); + map->notify.event = op.event_channel_port; + rc = 0; + unlock_out: spin_unlock(&priv->lock); - return 0; + return rc; } static long gntdev_ioctl(struct file *flip, @@ -551,8 +612,8 @@ static long gntdev_ioctl(struct file *flip, case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR: return gntdev_ioctl_get_offset_for_vaddr(priv, ptr); - case IOCTL_GNTDEV_SET_MAX_GRANTS: - return gntdev_ioctl_set_max_grants(priv, ptr); + case IOCTL_GNTDEV_SET_UNMAP_NOTIFY: + return gntdev_ioctl_notify(priv, ptr); default: pr_debug("priv %p, unknown cmd %x\n", priv, cmd); @@ -568,7 +629,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) int index = vma->vm_pgoff; int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; struct grant_map *map; - int err = -EINVAL; + int i, err = -EINVAL; if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) return -EINVAL; @@ -580,47 +641,70 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) map = gntdev_find_map_index(priv, index, count); if (!map) goto unlock_out; - if (map->vma) + if (use_ptemod && map->vma) goto unlock_out; - if (priv->mm != vma->vm_mm) { + if (use_ptemod && priv->mm != vma->vm_mm) { printk(KERN_WARNING "Huh? Other mm?\n"); goto unlock_out; } + atomic_inc(&map->users); + vma->vm_ops = &gntdev_vmops; vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP; vma->vm_private_data = map; - map->vma = vma; - map->flags = GNTMAP_host_map | GNTMAP_application_map; - if (!(vma->vm_flags & VM_WRITE)) - map->flags |= GNTMAP_readonly; + if (use_ptemod) + map->vma = vma; + + if (map->flags) { + if ((vma->vm_flags & VM_WRITE) && + (map->flags & GNTMAP_readonly)) + return -EINVAL; + } else { + map->flags = GNTMAP_host_map; + if (!(vma->vm_flags & VM_WRITE)) + map->flags |= GNTMAP_readonly; + } spin_unlock(&priv->lock); - err = apply_to_page_range(vma->vm_mm, vma->vm_start, - vma->vm_end - vma->vm_start, - find_grant_ptes, map); - if (err) { - printk(KERN_WARNING "find_grant_ptes() failure.\n"); - return err; + if (use_ptemod) { + err = apply_to_page_range(vma->vm_mm, vma->vm_start, + vma->vm_end - vma->vm_start, + find_grant_ptes, map); + if (err) { + printk(KERN_WARNING "find_grant_ptes() failure.\n"); + goto out_put_map; + } } err = map_grant_pages(map); - if (err) { - printk(KERN_WARNING "map_grant_pages() failure.\n"); - return err; - } + if (err) + goto out_put_map; - map->is_mapped = 1; + if (!use_ptemod) { + for (i = 0; i < count; i++) { + err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE, + map->pages[i]); + if (err) + goto out_put_map; + } + } return 0; unlock_out: spin_unlock(&priv->lock); return err; + +out_put_map: + if (use_ptemod) + map->vma = NULL; + gntdev_put_map(map); + return err; } static const struct file_operations gntdev_fops = { @@ -646,6 +730,8 @@ static int __init gntdev_init(void) if (!xen_domain()) return -ENODEV; + use_ptemod = xen_pv_domain(); + err = misc_register(&gntdev_miscdev); if (err != 0) { printk(KERN_ERR "Could not register gntdev device\n"); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 9ef54ebc1194..3745a318defc 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -458,7 +458,14 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, if (ret) return ret; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return ret; + for (i = 0; i < count; i++) { + /* Do not add to override if the map failed. */ + if (map_ops[i].status) + continue; + /* m2p override only supported for GNTMAP_contains_pte mappings */ if (!(map_ops[i].flags & GNTMAP_contains_pte)) continue; @@ -483,6 +490,9 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, if (ret) return ret; + if (xen_feature(XENFEAT_auto_translated_physmap)) + return ret; + for (i = 0; i < count; i++) { ret = m2p_remove_override(pages[i]); if (ret) diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index ebb292859b59..95143dd6904d 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -61,7 +61,7 @@ static void xen_post_suspend(int cancelled) xen_mm_unpin_all(); } -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_HIBERNATION static int xen_suspend(void *data) { struct suspend_info *si = data; @@ -69,7 +69,7 @@ static int xen_suspend(void *data) BUG_ON(!irqs_disabled()); - err = sysdev_suspend(PMSG_SUSPEND); + err = sysdev_suspend(PMSG_FREEZE); if (err) { printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", err); @@ -118,7 +118,7 @@ static void do_suspend(void) } #endif - err = dpm_suspend_start(PMSG_SUSPEND); + err = dpm_suspend_start(PMSG_FREEZE); if (err) { printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err); goto out_thaw; @@ -127,7 +127,7 @@ static void do_suspend(void) printk(KERN_DEBUG "suspending xenstore...\n"); xs_suspend(); - err = dpm_suspend_noirq(PMSG_SUSPEND); + err = dpm_suspend_noirq(PMSG_FREEZE); if (err) { printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err); goto out_resume; @@ -147,7 +147,7 @@ static void do_suspend(void) err = stop_machine(xen_suspend, &si, cpumask_of(0)); - dpm_resume_noirq(PMSG_RESUME); + dpm_resume_noirq(si.cancelled ? PMSG_THAW : PMSG_RESTORE); if (err) { printk(KERN_ERR "failed to start xen_suspend: %d\n", err); @@ -161,7 +161,7 @@ out_resume: } else xs_suspend_cancel(); - dpm_resume_end(PMSG_RESUME); + dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); /* Make sure timer events get retriggered on all CPUs */ clock_was_set(); @@ -173,7 +173,7 @@ out: #endif shutting_down = SHUTDOWN_INVALID; } -#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_HIBERNATION */ struct shutdown_handler { const char *command; @@ -202,7 +202,7 @@ static void shutdown_handler(struct xenbus_watch *watch, { "poweroff", do_poweroff }, { "halt", do_poweroff }, { "reboot", do_reboot }, -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_HIBERNATION { "suspend", do_suspend }, #endif {NULL, NULL}, diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c new file mode 100644 index 000000000000..a4ff225ee868 --- /dev/null +++ b/drivers/xen/xen-balloon.c @@ -0,0 +1,256 @@ +/****************************************************************************** + * Xen balloon driver - enables returning/claiming memory to/from Xen. + * + * Copyright (c) 2003, B Dragovic + * Copyright (c) 2003-2004, M Williamson, K Fraser + * Copyright (c) 2005 Dan M. Smith, IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sysdev.h> +#include <linux/capability.h> + +#include <xen/xen.h> +#include <xen/interface/xen.h> +#include <xen/balloon.h> +#include <xen/xenbus.h> +#include <xen/features.h> +#include <xen/page.h> + +#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) + +#define BALLOON_CLASS_NAME "xen_memory" + +static struct sys_device balloon_sysdev; + +static int register_balloon(struct sys_device *sysdev); + +static struct xenbus_watch target_watch = +{ + .node = "memory/target" +}; + +/* React to a change in the target key */ +static void watch_target(struct xenbus_watch *watch, + const char **vec, unsigned int len) +{ + unsigned long long new_target; + int err; + + err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); + if (err != 1) { + /* This is ok (for domain0 at least) - so just return */ + return; + } + + /* The given memory/target value is in KiB, so it needs converting to + * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. + */ + balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); +} + +static int balloon_init_watcher(struct notifier_block *notifier, + unsigned long event, + void *data) +{ + int err; + + err = register_xenbus_watch(&target_watch); + if (err) + printk(KERN_ERR "Failed to set balloon watcher\n"); + + return NOTIFY_DONE; +} + +static struct notifier_block xenstore_notifier; + +static int __init balloon_init(void) +{ + if (!xen_domain()) + return -ENODEV; + + pr_info("xen-balloon: Initialising balloon driver.\n"); + + register_balloon(&balloon_sysdev); + + target_watch.callback = watch_target; + xenstore_notifier.notifier_call = balloon_init_watcher; + + register_xenstore_notifier(&xenstore_notifier); + + return 0; +} +subsys_initcall(balloon_init); + +static void balloon_exit(void) +{ + /* XXX - release balloon here */ + return; +} + +module_exit(balloon_exit); + +#define BALLOON_SHOW(name, format, args...) \ + static ssize_t show_##name(struct sys_device *dev, \ + struct sysdev_attribute *attr, \ + char *buf) \ + { \ + return sprintf(buf, format, ##args); \ + } \ + static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL) + +BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages)); +BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low)); +BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high)); + +static SYSDEV_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay); +static SYSDEV_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); +static SYSDEV_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); +static SYSDEV_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); + +static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr, + char *buf) +{ + return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages)); +} + +static ssize_t store_target_kb(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, + size_t count) +{ + char *endchar; + unsigned long long target_bytes; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + target_bytes = simple_strtoull(buf, &endchar, 0) * 1024; + + balloon_set_new_target(target_bytes >> PAGE_SHIFT); + + return count; +} + +static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR, + show_target_kb, store_target_kb); + + +static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr, + char *buf) +{ + return sprintf(buf, "%llu\n", + (unsigned long long)balloon_stats.target_pages + << PAGE_SHIFT); +} + +static ssize_t store_target(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, + size_t count) +{ + char *endchar; + unsigned long long target_bytes; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + target_bytes = memparse(buf, &endchar); + + balloon_set_new_target(target_bytes >> PAGE_SHIFT); + + return count; +} + +static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR, + show_target, store_target); + + +static struct sysdev_attribute *balloon_attrs[] = { + &attr_target_kb, + &attr_target, + &attr_schedule_delay.attr, + &attr_max_schedule_delay.attr, + &attr_retry_count.attr, + &attr_max_retry_count.attr +}; + +static struct attribute *balloon_info_attrs[] = { + &attr_current_kb.attr, + &attr_low_kb.attr, + &attr_high_kb.attr, + NULL +}; + +static struct attribute_group balloon_info_group = { + .name = "info", + .attrs = balloon_info_attrs +}; + +static struct sysdev_class balloon_sysdev_class = { + .name = BALLOON_CLASS_NAME +}; + +static int register_balloon(struct sys_device *sysdev) +{ + int i, error; + + error = sysdev_class_register(&balloon_sysdev_class); + if (error) + return error; + + sysdev->id = 0; + sysdev->cls = &balloon_sysdev_class; + + error = sysdev_register(sysdev); + if (error) { + sysdev_class_unregister(&balloon_sysdev_class); + return error; + } + + for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { + error = sysdev_create_file(sysdev, balloon_attrs[i]); + if (error) + goto fail; + } + + error = sysfs_create_group(&sysdev->kobj, &balloon_info_group); + if (error) + goto fail; + + return 0; + + fail: + while (--i >= 0) + sysdev_remove_file(sysdev, balloon_attrs[i]); + sysdev_unregister(sysdev); + sysdev_class_unregister(&balloon_sysdev_class); + return error; +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index baa65e7fbbc7..739769551e33 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -577,7 +577,7 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus) } EXPORT_SYMBOL_GPL(xenbus_dev_changed); -int xenbus_dev_suspend(struct device *dev, pm_message_t state) +int xenbus_dev_suspend(struct device *dev) { int err = 0; struct xenbus_driver *drv; @@ -590,7 +590,7 @@ int xenbus_dev_suspend(struct device *dev, pm_message_t state) return 0; drv = to_xenbus_driver(dev->driver); if (drv->suspend) - err = drv->suspend(xdev, state); + err = drv->suspend(xdev); if (err) printk(KERN_WARNING "xenbus: suspend %s failed: %i\n", dev_name(dev), err); @@ -642,6 +642,14 @@ int xenbus_dev_resume(struct device *dev) } EXPORT_SYMBOL_GPL(xenbus_dev_resume); +int xenbus_dev_cancel(struct device *dev) +{ + /* Do nothing */ + DPRINTK("cancel"); + return 0; +} +EXPORT_SYMBOL_GPL(xenbus_dev_cancel); + /* A flag to determine if xenstored is 'ready' (i.e. has started) */ int xenstored_ready = 0; diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h index 24665812316a..888b9900ca08 100644 --- a/drivers/xen/xenbus/xenbus_probe.h +++ b/drivers/xen/xenbus/xenbus_probe.h @@ -64,8 +64,9 @@ extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); extern void xenbus_dev_shutdown(struct device *_dev); -extern int xenbus_dev_suspend(struct device *dev, pm_message_t state); +extern int xenbus_dev_suspend(struct device *dev); extern int xenbus_dev_resume(struct device *dev); +extern int xenbus_dev_cancel(struct device *dev); extern void xenbus_otherend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len, diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index 5bcc2d6cf129..b6a2690c9d49 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -85,6 +85,14 @@ static struct device_attribute xenbus_frontend_dev_attrs[] = { __ATTR_NULL }; +static const struct dev_pm_ops xenbus_pm_ops = { + .suspend = xenbus_dev_suspend, + .resume = xenbus_dev_resume, + .freeze = xenbus_dev_suspend, + .thaw = xenbus_dev_cancel, + .restore = xenbus_dev_resume, +}; + static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/<id> */ @@ -100,8 +108,7 @@ static struct xen_bus_type xenbus_frontend = { .shutdown = xenbus_dev_shutdown, .dev_attrs = xenbus_frontend_dev_attrs, - .suspend = xenbus_dev_suspend, - .resume = xenbus_dev_resume, + .pm = &xenbus_pm_ops, }, }; |